예제 #1
0
    def get_student_info_path_and_context(self, data):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        """
        student_id = data.params.get('student_id', '')
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps

        if student_id:
            student_item = self.get_student_item_dict()
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission = submissions[0]
                submission_uuid = submissions[0]['uuid']

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria),
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
예제 #2
0
    def get_student_info_path_and_context(self, data):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        """
        student_id = data.params.get('student_id', '')
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps

        if student_id:
            student_item = self.get_student_item_dict()
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission = submissions[0]
                submission_uuid = submissions[0]['uuid']

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)
        else:
            peer_assessments = []
            submitted_assessments = []

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
        else:
            self_assessment = None

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria),
        }

        if peer_assessments or self_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
예제 #3
0
    def get_student_info_path_and_context(self, data):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        """
        student_id = data.params.get("student_id", "")
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps

        if student_id:
            student_item = self.get_student_item_dict()
            student_item["student_id"] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission = submissions[0]
                submission_uuid = submissions[0]["uuid"]

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)
        else:
            peer_assessments = []
            submitted_assessments = []

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
        else:
            self_assessment = None

        context = {
            "submission": submission,
            "peer_assessments": peer_assessments,
            "submitted_assessments": submitted_assessments,
            "self_assessment": self_assessment,
            "rubric_criteria": copy.deepcopy(self.rubric_criteria),
        }

        if peer_assessments or self_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = "openassessmentblock/staff_debug/student_info.html"
        return path, context
예제 #4
0
    def test_get_max_scores(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
        assessment = peer_api.create_assessment(
            bob_sub["uuid"], bob["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            1
        )

        max_scores = peer_api.get_rubric_max_scores(sub["uuid"])
        self.assertEqual(max_scores['secret'], 1)
        self.assertEqual(max_scores['giveup'], 10)
예제 #5
0
    def add_submission_context(self, submission_uuid, context):
        """
        Add the submission information (self asssessment, peer assessments, final grade, etc.)
        to the supplied context for display in the "learner info" portion of staff tools.
        Args:
            submission_uuid (unicode): The uuid of the submission, should NOT be None.
            context: the context to update with additional information
        """
        assessment_steps = self.assessment_steps

        example_based_assessment = None
        example_based_assessment_grade_context = None

        self_assessment = None
        self_assessment_grade_context = None

        peer_assessments = None
        peer_assessments_grade_context = []

        staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        staff_assessment_grade_context = None

        submitted_assessments = None

        grade_details = None

        workflow = self.get_workflow_info(submission_uuid=submission_uuid)
        grade_exists = workflow.get('status') == "done"

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid)
            if grade_exists:
                peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
                peer_assessments_grade_context = [
                    self._assessment_grade_context(peer_assessment)
                    for peer_assessment in peer_assessments
                ]

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
            if grade_exists:
                self_assessment_grade_context = self._assessment_grade_context(self_assessment)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)
            if grade_exists:
                example_based_assessment_grade_context = self._assessment_grade_context(example_based_assessment)

        if grade_exists:
            if staff_assessment:
                staff_assessment_grade_context = self._assessment_grade_context(staff_assessment)

            grade_details = self.grade_details(
                submission_uuid,
                peer_assessments_grade_context,
                self_assessment_grade_context,
                example_based_assessment_grade_context,
                staff_assessment_grade_context,
                is_staff=True,
            )

        workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)

        context.update({
            'example_based_assessment': [example_based_assessment] if example_based_assessment else None,
            'self_assessment': [self_assessment] if self_assessment else None,
            'peer_assessments': peer_assessments,
            'staff_assessment': [staff_assessment] if staff_assessment else None,
            'submitted_assessments': submitted_assessments,
            'grade_details': grade_details,
            'score': workflow.get('score'),
            'workflow_status': workflow.get('status'),
            'workflow_cancellation': workflow_cancellation,
        })

        if peer_assessments or self_assessment or example_based_assessment or staff_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]
예제 #6
0
    def get_student_info_path_and_context(self, student_id):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        Args:
            student_id (unicode): The ID of the student to report.

        """
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps

        if student_id:
            student_item = self.get_student_item_dict()
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission_uuid = submissions[0]['uuid']
                submission = submissions[0]

                if 'file_key' in submission.get('answer', {}):
                    file_key = submission['answer']['file_key']

                    try:
                        submission['image_url'] = file_api.get_download_url(
                            file_key)
                    except file_api.FileUploadError:
                        # Log the error, but do not prevent the rest of the student info
                        # from being displayed.
                        msg = (
                            u"Could not retrieve image URL for staff debug page.  "
                            u"The student ID is '{student_id}', and the file key is {file_key}"
                        ).format(student_id=student_id, file_key=file_key)
                        logger.exception(msg)

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(
                submission_uuid, scored_only=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(
                submission_uuid)

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
예제 #7
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(asmnt)
                for asmnt in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid)
            )

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = self._assessment_grade_context(
                ai_api.get_latest_assessment(submission_uuid)
            )

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': self._rubric_criteria_grade_context(peer_assessments, self_assessment),
            'has_submitted_feedback': has_submitted_feedback,
            'allow_file_upload': self.allow_file_upload,
            'allow_latex': self.allow_latex,
            'file_url': self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                # Although we prevent course authors from modifying criteria post-release,
                # it's still possible for assessments created by course staff to
                # have criteria that differ from the current problem definition.
                # It's also possible to circumvent the post-release restriction
                # if course authors directly import a course into Studio.
                # If this happens, we simply leave the score blank so that the grade
                # section can render without error.
                criterion["median_score"] = median_scores.get(criterion["name"], '')
                criterion["total_value"] = max_scores.get(criterion["name"], '')

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
예제 #8
0
    def get_student_info_path_and_context(self, student_username):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        Args:
            student_username (unicode): The username of the student to report.

        """
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps
        anonymous_user_id = None
        submissions = None
        student_item = None

        if student_username:
            anonymous_user_id = self.get_anonymous_user_id(student_username, self.course_id)
            student_item = self.get_student_item_dict(anonymous_user_id=anonymous_user_id)

        if anonymous_user_id:
            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

        if submissions:
            submission_uuid = submissions[0]['uuid']
            submission = submissions[0]

            if 'file_key' in submission.get('answer', {}):
                file_key = submission['answer']['file_key']

                try:
                    submission['image_url'] = file_api.get_download_url(file_key)
                except file_api.FileUploadError:
                    # Log the error, but do not prevent the rest of the student info
                    # from being displayed.
                    msg = (
                        u"Could not retrieve image URL for staff debug page.  "
                        u"The student username is '{student_username}', and the file key is {file_key}"
                    ).format(student_username=student_username, file_key=file_key)
                    logger.exception(msg)

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        workflow_cancellation = workflow_api.get_assessment_workflow_cancellation(submission_uuid)
        if workflow_cancellation:
            workflow_cancellation['cancelled_by'] = self.get_username(workflow_cancellation['cancelled_by_id'])

        context = {
            'submission': submission,
            'workflow_cancellation': workflow_cancellation,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
예제 #9
0
    def handle(self, *args, **options):
        if len(args) != 2:
            raise CommandError(
                "This command requires two arguments: <course_id> <username>")

        course_id, username, = args
        # Check args: course_id
        try:
            course_id = CourseLocator.from_string(course_id)
        except InvalidKeyError:
            raise CommandError(
                "The course_id is not of the right format. It should be like 'org/course/run' or 'course-v1:org+course+run'"
            )

        # Find course
        course_items = modulestore().get_items(
            course_id, qualifiers={'category': 'course'})
        if not course_items:
            raise CommandError("No such course was found.")

        # Find openassessment items
        oa_items = modulestore().get_items(
            course_id, qualifiers={'category': 'openassessment'})
        if not oa_items:
            raise CommandError("No openassessment item was found.")
        oa_items = sorted(
            oa_items,
            key=lambda item: item.start or datetime(2030, 1, 1, tzinfo=UTC()))
        print "Openassessment item(s):"
        oa_output = PrettyTable(['#', 'Item ID', 'Title'])
        oa_output.align = 'l'
        for i, oa_item in enumerate(oa_items):
            row = []
            row.append(i)
            row.append(oa_item.location)
            row.append(oa_item.title)
            oa_output.add_row(row)
        print oa_output
        while True:
            try:
                selected = raw_input(
                    "Choose an openassessment item # (empty to cancel): ")
                if selected == '':
                    print "Cancelled."
                    return
                selected = int(selected)
                oa_item = oa_items[selected]
                break
            except (IndexError, ValueError):
                print "WARN: Invalid number was detected. Choose again."
                continue

        item_location = oa_item.location

        # Get student_id from username
        # TODO: courseenrollment parameters can be used by only lms?
        students = User.objects.filter(username=username,
                                       is_active=True,
                                       courseenrollment__course_id=course_id,
                                       courseenrollment__is_active=True)
        if not students:
            raise CommandError("No such user was found.")
        student = students[0]
        anonymous_student_id = anonymous_id_for_user(student, course_id)

        # Get submission from student_id, course_id and item_location
        submission = get_submission(course_id, item_location,
                                    anonymous_student_id)

        # Print summary
        print_summary(course_id, oa_item, anonymous_student_id)

        while True:
            print "[0] Show the user's submission again."
            print "[1] Toggle the `scored` flag in the peer-assessment record."
            print "[2] Create a new peer-assessment record to the users."
            resp = raw_input("Choose an operation (empty to cancel): ")

            if resp == '0':
                print_summary(course_id, oa_item, anonymous_student_id)

            elif resp == '1':
                while True:
                    try:
                        selected_item_id = raw_input(
                            "Please input PeerWorkflowItem ID to toggle the `scored` flag (empty to cancel): "
                        )
                        if selected_item_id == '':
                            print "Cancelled."
                            break
                        selected_item_id = int(selected_item_id)
                        selected_item = PeerWorkflowItem.objects.filter(
                            id=selected_item_id,
                            author=submission.id,
                            submission_uuid=submission.submission_uuid,
                            assessment__isnull=False)[0]
                    except (IndexError, ValueError):
                        print "WARN: Invalid ID was detected. Input again."
                        continue
                    # Update PeerWorkflowItem (assessment_peerworkflowitem record)
                    selected_item.scored = not selected_item.scored
                    selected_item.save()
                    # Update Score (submissions_score record)
                    latest_score = get_latest_score(submission)
                    if latest_score is not None:
                        max_scores = peer_api.get_rubric_max_scores(
                            submission.submission_uuid)
                        try:
                            median_scores = peer_api.get_assessment_median_scores(
                                submission.submission_uuid)
                        except:
                            median_scores = {}
                        sub_api.set_score(submission.submission_uuid,
                                          sum(median_scores.values()),
                                          sum(max_scores.values()))
                        #latest_score.points_earned = sum(median_scores.values())
                        #latest_score.created_at = now()
                        #latest_score.save()
                    # Update status of AssessmentWorkflow (workflow_assessmentworkflow record)
                    get_workflow_info(submission.submission_uuid, oa_item)

                    # Print summary
                    print_summary(course_id, oa_item, anonymous_student_id)

            elif resp == '2':
                while True:
                    staff_username = raw_input(
                        "Please input username to be given a new peer-assessment item (empty to cancel): "
                    )
                    if staff_username == '':
                        print "Cancelled."
                        break
                    # TODO: courseenrollment parameters can be used by only lms?
                    staffs = User.objects.filter(
                        username=staff_username,
                        is_active=True,
                        courseenrollment__course_id=course_id,
                        courseenrollment__is_active=True)
                    if not staffs:
                        print "WARN: No such user was found in the course. Input again."
                        continue
                    staff = staffs[0]
                    anonymous_staff_id = anonymous_id_for_user(
                        staff, course_id)
                    staff_submissions = PeerWorkflow.objects.filter(
                        course_id=course_id,
                        item_id=item_location,
                        student_id=anonymous_staff_id)
                    if not staff_submissions:
                        print "WARN: This user hasn't posted any submission in this openassessment item yet. Input again."
                        continue
                    staff_submission = staff_submissions[0]
                    # Check if this user has already assessed the requested submission
                    items_assessed_by_staff = PeerWorkflowItem.objects.filter(
                        scorer=staff_submission,
                        author=submission,
                        submission_uuid=submission.submission_uuid)
                    if len(items_assessed_by_staff) > 0:
                        print "WARN: This user has already assessed the requested submission. Input again."
                        continue
                    print "Staff submission:"
                    print_submission(staff_submission, oa_item)

                    while True:
                        resp = raw_input("Is this right? (y/n): ")
                        if resp.lower() == 'y':
                            new_items = PeerWorkflowItem.objects.filter(
                                scorer_id=staff_submission.id,
                                assessment__isnull=True).order_by(
                                    '-started_at')
                            if new_items:
                                # Replace the author and submission_uuid
                                new_item = new_items[0]
                                new_item.author = submission
                                new_item.submission_uuid = submission.submission_uuid
                                new_item.started_at = now()
                            else:
                                new_item = PeerWorkflowItem.objects.create(
                                    scorer=staff_submission,
                                    author=submission,
                                    submission_uuid=submission.submission_uuid,
                                    started_at=now())
                            new_item.save()
                            print "Create a new peer-assessment record to %s successfully!" % staff.username
                            break
                        elif resp.lower() == 'n':
                            break
                        else:
                            continue

            elif resp == '':
                print "Cancelled."
                break
            else:
                print "WARN: Invalid number was detected. Choose again."
                continue
예제 #10
0
    def grade_details(self,
                      submission_uuid,
                      peer_assessments,
                      self_assessment,
                      staff_assessment,
                      is_staff=False):
        """
        Returns details about the grade assigned to the submission.

        Args:
            submission_uuid (str): The id of the submission being graded.
            peer_assessments (list of dict): Serialized assessment models from the peer API.
            self_assessment (dict): Serialized assessment model from the self API
            staff_assessment (dict): Serialized assessment model from the staff API
            is_staff (bool): True if the grade details are being displayed to staff, else False.
                Default value is False (meaning grade details are being shown to the learner).

        Returns:
            A dictionary with full details about the submission's grade.

        Example:
            {
                criteria: [{
                    'label': 'Test name',
                    'name': 'f78ac7d4ca1e4134b0ba4b40ca212e72',
                    'prompt': 'Test prompt',
                    'order_num': 2,
                    'options': [...]
                    'feedback': [
                        'Good job!',
                        'Excellent work!',
                    ]
                }],
                additional_feedback: [{
                }]
                ...
            }
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api

        criteria = copy.deepcopy(self.rubric_criteria_with_labels)

        def has_feedback(assessments):
            """
            Returns True if at least one assessment has feedback.

            Args:
                assessments: A list of assessments

            Returns:
                Returns True if at least one assessment has feedback.
            """
            return any((assessment and (
                assessment.get('feedback', None)
                or has_feedback(assessment.get('individual_assessments', []))))
                       for assessment in assessments)

        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        assessment_steps = self.assessment_steps
        if staff_assessment:
            median_scores = staff_api.get_assessment_scores_by_criteria(
                submission_uuid)
        elif "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(
                submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(
                submission_uuid)

        for criterion in criteria:
            criterion_name = criterion['name']

            # Record assessment info for the current criterion
            criterion['assessments'] = self._graded_assessments(
                submission_uuid,
                criterion,
                assessment_steps,
                staff_assessment,
                peer_assessments,
                self_assessment,
                is_staff=is_staff,
            )

            # Record whether there is any feedback provided in the assessments
            criterion['has_feedback'] = has_feedback(criterion['assessments'])

            # Although we prevent course authors from modifying criteria post-release,
            # it's still possible for assessments created by course staff to
            # have criteria that differ from the current problem definition.
            # It's also possible to circumvent the post-release restriction
            # if course authors directly import a course into Studio.
            # If this happens, we simply leave the score blank so that the grade
            # section can render without error.
            criterion['median_score'] = median_scores.get(criterion_name, '')
            criterion['total_value'] = max_scores.get(criterion_name, '')

        return {
            'criteria':
            criteria,
            'additional_feedback':
            self._additional_feedback(
                staff_assessment=staff_assessment,
                peer_assessments=peer_assessments,
                self_assessment=self_assessment,
            ),
        }
예제 #11
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = peer_api.get_assessments(submission_uuid)
            has_submitted_feedback = feedback is not None
        else:
            feedback = None
            peer_assessments = []
            has_submitted_feedback = False

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
        else:
            self_assessment = None

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback': has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
예제 #12
0
    def add_submission_context(self, submission_uuid, context):
        """
        Add the submission information (self asssessment, peer assessments, final grade, etc.)
        to the supplied context for display in the "learner info" portion of staff tools.
        Args:
            submission_uuid (unicode): The uuid of the submission, should NOT be None.
            context: the context to update with additional information
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api

        assessment_steps = self.assessment_steps

        self_assessment = None
        self_assessment_grade_context = None

        peer_assessments = None
        peer_assessments_grade_context = []

        staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        staff_assessment_grade_context = None

        submitted_assessments = None

        grade_details = None

        workflow = self.get_workflow_info(submission_uuid=submission_uuid)
        grade_exists = workflow.get('status') == "done"
        grade_utils = self.runtime._services.get('grade_utils')

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid)
            if grade_exists:
                peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
                peer_assessments_grade_context = [
                    self._assessment_grade_context(peer_assessment)
                    for peer_assessment in peer_assessments
                ]

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
            if grade_exists:
                self_assessment_grade_context = self._assessment_grade_context(self_assessment)

        if grade_exists:
            if staff_assessment:
                staff_assessment_grade_context = self._assessment_grade_context(staff_assessment)

            grade_details = self.grade_details(
                submission_uuid,
                peer_assessments_grade_context,
                self_assessment_grade_context,
                staff_assessment_grade_context,
                is_staff=True,
            )

        workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)

        context.update({
            'self_assessment': [self_assessment] if self_assessment else None,
            'peer_assessments': peer_assessments,
            'staff_assessment': [staff_assessment] if staff_assessment else None,
            'submitted_assessments': submitted_assessments,
            'grade_details': grade_details,
            'score': workflow.get('score'),
            'workflow_status': workflow.get('status'),
            'workflow_cancellation': workflow_cancellation,
            'are_grades_frozen': grade_utils.are_grades_frozen()
        })

        if peer_assessments or self_assessment or staff_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]
예제 #13
0
 def test_max_score_db_error(self, mock_filter):
     mock_filter.side_effect = DatabaseError("Bad things happened")
     tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
     peer_api.get_rubric_max_scores(tim["uuid"])
예제 #14
0
    def handle(self, *args, **options):
        if len(args) != 2:
            raise CommandError("This command requires two arguments: <course_id> <username>")

        course_id, username, = args
        # Check args: course_id
        try:
            course_id = CourseLocator.from_string(course_id)
        except InvalidKeyError:
            raise CommandError("The course_id is not of the right format. It should be like 'org/course/run' or 'course-v1:org+course+run'")

        # Find course
        course_items = modulestore().get_items(course_id, qualifiers={'category': 'course'})
        if not course_items:
            raise CommandError("No such course was found.")

        # Find openassessment items
        oa_items = modulestore().get_items(course_id, qualifiers={'category': 'openassessment'})
        if not oa_items:
            raise CommandError("No openassessment item was found.")
        oa_items = sorted(oa_items, key=lambda item:item.start or datetime(2030, 1, 1, tzinfo=UTC()))
        print "Openassessment item(s):"
        oa_output = PrettyTable(['#', 'Item ID', 'Title'])
        oa_output.align = 'l'
        for i, oa_item in enumerate(oa_items):
            row = []
            row.append(i)
            row.append(oa_item.location)
            row.append(oa_item.title)
            oa_output.add_row(row)
        print oa_output
        while True:
            try:
                selected = raw_input("Choose an openassessment item # (empty to cancel): ")
                if selected == '':
                    print "Cancelled."
                    return
                selected = int(selected)
                oa_item = oa_items[selected]
                break
            except (IndexError, ValueError):
                print "WARN: Invalid number was detected. Choose again."
                continue

        item_location = oa_item.location

        # Get student_id from username
        # TODO: courseenrollment parameters can be used by only lms?
        students = User.objects.filter(username=username, is_active=True, courseenrollment__course_id=course_id, courseenrollment__is_active=True)
        if not students:
            raise CommandError("No such user was found.")
        student = students[0]
        anonymous_student_id = anonymous_id_for_user(student, course_id)

        # Get submission from student_id, course_id and item_location
        submission = get_submission(course_id, item_location, anonymous_student_id)

        # Print summary
        print_summary(course_id, oa_item, anonymous_student_id)

        while True:
            print "[0] Show the user's submission again."
            print "[1] Toggle the `scored` flag in the peer-assessment record."
            print "[2] Create a new peer-assessment record to the users."
            resp = raw_input("Choose an operation (empty to cancel): ")

            if resp == '0':
                print_summary(course_id, oa_item, anonymous_student_id)

            elif resp == '1':
                while True:
                    try:
                        selected_item_id = raw_input("Please input PeerWorkflowItem ID to toggle the `scored` flag (empty to cancel): ")
                        if selected_item_id == '':
                            print "Cancelled."
                            break
                        selected_item_id = int(selected_item_id)
                        selected_item = PeerWorkflowItem.objects.filter(id=selected_item_id, author=submission.id, submission_uuid=submission.submission_uuid, assessment__isnull=False)[0]
                    except (IndexError, ValueError):
                        print "WARN: Invalid ID was detected. Input again."
                        continue
                    # Update PeerWorkflowItem (assessment_peerworkflowitem record)
                    selected_item.scored = not selected_item.scored
                    selected_item.save()
                    # Update Score (submissions_score record)
                    latest_score = get_latest_score(submission)
                    if latest_score is not None:
                        max_scores = peer_api.get_rubric_max_scores(submission.submission_uuid)
                        try:
                            median_scores = peer_api.get_assessment_median_scores(submission.submission_uuid)
                        except:
                            median_scores = {}
                        sub_api.set_score(submission.submission_uuid, sum(median_scores.values()), sum(max_scores.values()))
                        #latest_score.points_earned = sum(median_scores.values())
                        #latest_score.created_at = now()
                        #latest_score.save()
                    # Update status of AssessmentWorkflow (workflow_assessmentworkflow record)
                    get_workflow_info(submission.submission_uuid, oa_item)

                    # Print summary
                    print_summary(course_id, oa_item, anonymous_student_id)

            elif resp == '2':
                while True:
                    staff_username = raw_input("Please input username to be given a new peer-assessment item (empty to cancel): ")
                    if staff_username == '':
                        print "Cancelled."
                        break
                    # TODO: courseenrollment parameters can be used by only lms?
                    staffs = User.objects.filter(username=staff_username, is_active=True, courseenrollment__course_id=course_id, courseenrollment__is_active=True)
                    if not staffs:
                        print "WARN: No such user was found in the course. Input again."
                        continue
                    staff = staffs[0]
                    anonymous_staff_id = anonymous_id_for_user(staff, course_id)
                    staff_submissions = PeerWorkflow.objects.filter(course_id=course_id, item_id=item_location, student_id=anonymous_staff_id)
                    if not staff_submissions:
                        print "WARN: This user hasn't posted any submission in this openassessment item yet. Input again."
                        continue
                    staff_submission = staff_submissions[0]
                    # Check if this user has already assessed the requested submission
                    items_assessed_by_staff = PeerWorkflowItem.objects.filter(
                        scorer=staff_submission,
                        author=submission,
                        submission_uuid=submission.submission_uuid
                    )
                    if len(items_assessed_by_staff) > 0:
                        print "WARN: This user has already assessed the requested submission. Input again."
                        continue
                    print "Staff submission:"
                    print_submission(staff_submission, oa_item)

                    while True:
                        resp = raw_input("Is this right? (y/n): ")
                        if resp.lower() == 'y':
                            new_items = PeerWorkflowItem.objects.filter(scorer_id=staff_submission.id, assessment__isnull=True).order_by('-started_at')
                            if new_items:
                                # Replace the author and submission_uuid
                                new_item = new_items[0]
                                new_item.author = submission
                                new_item.submission_uuid = submission.submission_uuid
                                new_item.started_at = now()
                            else:
                                new_item = PeerWorkflowItem.objects.create(
                                    scorer=staff_submission,
                                    author=submission,
                                    submission_uuid=submission.submission_uuid,
                                    started_at=now()
                                )
                            new_item.save()
                            print "Create a new peer-assessment record to %s successfully!" % staff.username
                            break
                        elif resp.lower() == 'n':
                            break
                        else:
                            continue

            elif resp == '':
                print "Cancelled."
                break
            else:
                print "WARN: Invalid number was detected. Choose again."
                continue
예제 #15
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(asmnt)
                for asmnt in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid))

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = self._assessment_grade_context(
                ai_api.get_latest_assessment(submission_uuid))

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'student_submission':
            student_submission,
            'peer_assessments':
            peer_assessments,
            'self_assessment':
            self_assessment,
            'example_based_assessment':
            example_based_assessment,
            'rubric_criteria':
            self._rubric_criteria_grade_context(peer_assessments,
                                                self_assessment),
            'has_submitted_feedback':
            has_submitted_feedback,
            'allow_file_upload':
            self.allow_file_upload,
            'file_url':
            self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(
                submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(
                submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(
                submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                # Although we prevent course authors from modifying criteria post-release,
                # it's still possible for assessments created by course staff to
                # have criteria that differ from the current problem definition.
                # It's also possible to circumvent the post-release restriction
                # if course authors directly import a course into Studio.
                # If this happens, we simply leave the score blank so that the grade
                # section can render without error.
                criterion["median_score"] = median_scores.get(
                    criterion["name"], '')
                criterion["total_value"] = max_scores.get(
                    criterion["name"], '')

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
예제 #16
0
    def add_submission_context(self, submission_uuid, context):
        """
        Add the submission information (self asssessment, peer assessments, final grade, etc.)
        to the supplied context for display in the "learner info" portion of staff tools.
        Args:
            submission_uuid (unicode): The uuid of the submission, should NOT be None.
            context: the context to update with additional information
        """
        assessment_steps = self.assessment_steps

        example_based_assessment = None
        example_based_assessment_grade_context = None

        self_assessment = None
        self_assessment_grade_context = None

        peer_assessments = None
        peer_assessments_grade_context = []

        staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        staff_assessment_grade_context = None

        submitted_assessments = None

        grade_details = None

        workflow = self.get_workflow_info(submission_uuid=submission_uuid)
        grade_exists = workflow.get('status') == "done"

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid)
            if grade_exists:
                peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
                peer_assessments_grade_context = [
                    self._assessment_grade_context(peer_assessment)
                    for peer_assessment in peer_assessments
                ]

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
            if grade_exists:
                self_assessment_grade_context = self._assessment_grade_context(self_assessment)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)
            if grade_exists:
                example_based_assessment_grade_context = self._assessment_grade_context(example_based_assessment)

        if grade_exists:
            if staff_assessment:
                staff_assessment_grade_context = self._assessment_grade_context(staff_assessment)

            grade_details = self.grade_details(
                submission_uuid,
                peer_assessments_grade_context,
                self_assessment_grade_context,
                example_based_assessment_grade_context,
                staff_assessment_grade_context,
                is_staff=True,
            )

        workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)

        context.update({
            'example_based_assessment': [example_based_assessment] if example_based_assessment else None,
            'self_assessment': [self_assessment] if self_assessment else None,
            'peer_assessments': peer_assessments,
            'staff_assessment': [staff_assessment] if staff_assessment else None,
            'submitted_assessments': submitted_assessments,
            'grade_details': grade_details,
            'score': workflow.get('score'),
            'workflow_status': workflow.get('status'),
            'workflow_cancellation': workflow_cancellation,
        })

        if peer_assessments or self_assessment or example_based_assessment or staff_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]
예제 #17
0
    def add_submission_context(self, submission_uuid, context):
        """
        Add the submission information (self asssessment, peer assessments, final grade, etc.)
        to the supplied context for display in the "learner info" portion of staff tools.
        Args:
            submission_uuid (unicode): The uuid of the submission, should NOT be None.
            context: the context to update with additional information
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api

        assessment_steps = self.assessment_steps

        self_assessment = None
        self_assessment_grade_context = None

        peer_assessments = None
        peer_assessments_grade_context = []

        staff_assessment = staff_api.get_latest_staff_assessment(
            submission_uuid)
        staff_assessment_grade_context = None

        submitted_assessments = None

        grade_details = None

        workflow = self.get_workflow_info(submission_uuid=submission_uuid)
        grade_exists = workflow.get('status') == "done"
        grade_utils = self.runtime._services.get('grade_utils')  # pylint: disable=protected-access

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(
                submission_uuid)
            if grade_exists:
                peer_api.get_score(submission_uuid,
                                   self.workflow_requirements()["peer"])
                peer_assessments_grade_context = [
                    self._assessment_grade_context(peer_assessment)
                    for peer_assessment in peer_assessments
                ]

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
            if grade_exists:
                self_assessment_grade_context = self._assessment_grade_context(
                    self_assessment)

        if grade_exists:
            if staff_assessment:
                staff_assessment_grade_context = self._assessment_grade_context(
                    staff_assessment)

            grade_details = self.grade_details(
                submission_uuid,
                peer_assessments_grade_context,
                self_assessment_grade_context,
                staff_assessment_grade_context,
                is_staff=True,
            )

        workflow_cancellation = self.get_workflow_cancellation_info(
            workflow['submission_uuid'])

        context.update({
            'self_assessment': [self_assessment] if self_assessment else None,
            'peer_assessments':
            peer_assessments,
            'staff_assessment':
            [staff_assessment] if staff_assessment else None,
            'submitted_assessments':
            submitted_assessments,
            'grade_details':
            grade_details,
            'score':
            workflow.get('score'),
            'workflow_status':
            workflow.get('status'),
            'workflow_cancellation':
            workflow_cancellation,
            'are_grades_frozen':
            grade_utils.are_grades_frozen() if grade_utils else None
        })

        if peer_assessments or self_assessment or staff_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]
예제 #18
0
    def get_student_info_path_and_context(self, student_id):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        Args:
            student_id (unicode): The ID of the student to report.

        """
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps
        student_item = self.get_student_item_dict()
        scores = {}
        problem_closed = None

        if student_id:
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission_uuid = submissions[0]['uuid']
                submission = submissions[0]

                if 'file_key' in submission.get('answer', {}):
                    file_key = submission['answer']['file_key']

                    try:
                        submission['image_url'] = file_api.get_download_url(file_key)
                    except file_api.FileUploadError:
                        # Log the error, but do not prevent the rest of the student info
                        # from being displayed.
                        msg = (
                            u"Could not retrieve image URL for staff debug page.  "
                            u"The student ID is '{student_id}', and the file key is {file_key}"
                        ).format(student_id=student_id, file_key=file_key)
                        logger.exception(msg)

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)

            # Get the data we need for instructor override of the student's score
            rubric_dict = create_rubric_dict(self.prompt, self.rubric_criteria_with_labels)
            scores = peer_api.get_data_for_override_score(
                submission_uuid,
                student_item,
                rubric_dict,
            )
            problem_closed, dummy0, dummy1, dummy2 = self.is_closed(step='peer-assessment', course_staff=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
            'scores': scores,
            'problem_closed': problem_closed,
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
예제 #19
0
    def grade_details(
            self, submission_uuid, peer_assessments, self_assessment, staff_assessment,
            is_staff=False
    ):
        """
        Returns details about the grade assigned to the submission.

        Args:
            submission_uuid (str): The id of the submission being graded.
            peer_assessments (list of dict): Serialized assessment models from the peer API.
            self_assessment (dict): Serialized assessment model from the self API
            staff_assessment (dict): Serialized assessment model from the staff API
            is_staff (bool): True if the grade details are being displayed to staff, else False.
                Default value is False (meaning grade details are being shown to the learner).

        Returns:
            A dictionary with full details about the submission's grade.

        Example:
            {
                criteria: [{
                    'label': 'Test name',
                    'name': 'f78ac7d4ca1e4134b0ba4b40ca212e72',
                    'prompt': 'Test prompt',
                    'order_num': 2,
                    'options': [...]
                    'feedback': [
                        'Good job!',
                        'Excellent work!',
                    ]
                }],
                additional_feedback: [{
                }]
                ...
            }
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api

        criteria = copy.deepcopy(self.rubric_criteria_with_labels)

        def has_feedback(assessments):
            """
            Returns True if at least one assessment has feedback.

            Args:
                assessments: A list of assessments

            Returns:
                Returns True if at least one assessment has feedback.
            """
            return any(
                (
                    assessment and
                    (assessment.get('feedback', None) or has_feedback(assessment.get('individual_assessments', [])))
                )
                for assessment in assessments
            )

        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        assessment_steps = self.assessment_steps
        if staff_assessment:
            median_scores = staff_api.get_assessment_scores_by_criteria(submission_uuid)
        elif "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)

        for criterion in criteria:
            criterion_name = criterion['name']

            # Record assessment info for the current criterion
            criterion['assessments'] = self._graded_assessments(
                submission_uuid, criterion,
                assessment_steps,
                staff_assessment,
                peer_assessments,
                self_assessment,
                is_staff=is_staff,
            )

            # Record whether there is any feedback provided in the assessments
            criterion['has_feedback'] = has_feedback(criterion['assessments'])

            # Although we prevent course authors from modifying criteria post-release,
            # it's still possible for assessments created by course staff to
            # have criteria that differ from the current problem definition.
            # It's also possible to circumvent the post-release restriction
            # if course authors directly import a course into Studio.
            # If this happens, we simply leave the score blank so that the grade
            # section can render without error.
            criterion['median_score'] = median_scores.get(criterion_name, '')
            criterion['total_value'] = max_scores.get(criterion_name, '')

        return {
            'criteria': criteria,
            'additional_feedback': self._additional_feedback(
                staff_assessment=staff_assessment,
                peer_assessments=peer_assessments,
                self_assessment=self_assessment,
            ),
        }
예제 #20
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = peer_api.get_assessments(submission_uuid)
            has_submitted_feedback = feedback is not None
        else:
            feedback = None
            peer_assessments = []
            has_submitted_feedback = False

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
        else:
            self_assessment = None

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'student_submission':
            student_submission,
            'peer_assessments':
            peer_assessments,
            'self_assessment':
            self_assessment,
            'rubric_criteria':
            self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback':
            has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(
                submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(
                submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)