def generate_score(owner_submission_uuid):
    """
    Calculates the score of the submission using existing ORA2 api

    Input:
        - owner_submission_uuid
    Return:
        - Actual score to be stored in Score.points_earned
    """
    median_score_dict = get_assessment_median_scores(owner_submission_uuid)
    return sum(median_score_dict.values())
示例#2
0
def print_summary(course_id, oa_item, anonymous_student_id):
    # Print submission
    submission = get_submission(course_id, oa_item.location, anonymous_student_id)
    print "Submission status:"
    print_submission(submission, oa_item)

    # Print scored assessment(s)
    scored_items = PeerWorkflowItem.objects.filter(author=submission.id, submission_uuid=submission.submission_uuid, assessment__isnull=False, scored=True).order_by('assessment')
    print "Scored assessment(s):"
    if scored_items:
        scored_assessments = [scored_item.assessment for scored_item in scored_items]
        scored_scores = scores_by_criterion(scored_assessments)
        median_score_dict = Assessment.get_median_score_dict(scored_scores)
        print_peerworkflowitem(scored_items, scored_scores)
    else:
        scored_scores = {}
        print "... No record was found."

    # Print not-scored assessment(s)
    not_scored_items = PeerWorkflowItem.objects.filter(author=submission.id, submission_uuid=submission.submission_uuid, assessment__isnull=False, scored=False).order_by('assessment')
    print "Not-scored assessment(s):"
    if not_scored_items:
        not_scored_assessments = [not_scored_item.assessment for not_scored_item in not_scored_items]
        not_scored_scores = scores_by_criterion(not_scored_assessments)
        print_peerworkflowitem(not_scored_items, not_scored_scores)
    else:
        print "... No record was found."

    # Print latest score
    latest_score = get_latest_score(submission)
    print "Latest score:"
    if latest_score is not None:
        try:
            median_scores = peer_api.get_assessment_median_scores(submission.submission_uuid)
        except:
            median_scores = {}
        latest_score_output = PrettyTable(['Score ID'] + scored_scores.keys() + ['Points earned', 'Points possible', 'Created at'])
        latest_score_output.align = 'l'
        row = []
        row.append(latest_score.id)
        row.extend([median_scores[k] for k in scored_scores.keys()])
        row.append(latest_score.points_earned)
        row.append(latest_score.points_possible)
        row.append(latest_score.created_at)
        latest_score_output.add_row(row)
        print latest_score_output
    else:
        print "... No record was found."
示例#3
0
    def _peer_median_option(self, submission_uuid, criterion):
        """
        Returns the option for the median peer grade.

        Args:
            submission_uuid (str): The id for the submission.
            criterion (dict): The criterion in question.

        Returns:
            The option for the median peer grade.

        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api

        median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        median_score = median_scores.get(criterion['name'], None)

        def median_options():
            """
            Returns a list of options that should be shown to represent the median.

            Some examples:
              1. Options A=1, B=3, and C=5, a median score of 3 returns [B].
              2. Options A=1, B=3, and C=5, a median score of 4 returns [B, C].
              3. Options A=1, B=1, and C=3, a median score of 1 returns [A, B]
              4. Options A=1, B=1, C=3, and D=3, a median score of 2 return [A, B, C, D]
              5. Options A=1, B=3 and C=5, a median score of 6 returns [C]
                 Note: 5 should not happen as a median should never be out of range.
            """
            last_score = None
            median_options = []

            # Sort the options first by name and then by points, so that if there
            # are options with identical points they will sort alphabetically rather
            # than randomly. Note that this depends upon sorted being a stable sort.
            alphabetical_options = sorted(criterion['options'], key=lambda option: option['label'])
            ordered_options = sorted(alphabetical_options, key=lambda option: option['points'])

            for option in ordered_options:
                current_score = option['points']

                # If we have reached a new score, then decide what to do next
                if current_score is not last_score:

                    # If the last score we saw was already larger than the median
                    # score, then we must have collected enough so return all
                    # the median options.
                    if last_score >= median_score:
                        return median_options

                    # If the current score is exactly the median or is less,
                    # then we don't need any previously collected scores.
                    if current_score <= median_score:
                        median_options = []

                    # Update the last score to be the current one
                    last_score = current_score

                # Collect the current option in case it is applicable
                median_options.append(option)
            return median_options

        # Calculate the full list of matching options for the median, and then:
        #  - If zero or one matches are found, then just return None or the single item.
        #  - If more than one match is found, return a dict with an aggregate label,
        #  - the median score, and no explanation (it is too verbose to show an aggregate).
        options = median_options()
        if len(options) == 0:
            # If we weren't able to get a median option when there should be one, show the following message
            # This happens when there are less than must_be_graded_by assessments made for the user
            if len(criterion['options']) > 0:
                return {'label': _('Waiting for peer reviews')}
            else:
                return None
        if len(options) == 1:
            return options[0]
        return {
            'label': u' / '.join([option['label'] for option in options]),
            'points': median_score,
            'explanation': None,
        }
示例#4
0
    def grade_details(
            self, submission_uuid, peer_assessments, self_assessment, staff_assessment,
            is_staff=False
    ):
        """
        Returns details about the grade assigned to the submission.

        Args:
            submission_uuid (str): The id of the submission being graded.
            peer_assessments (list of dict): Serialized assessment models from the peer API.
            self_assessment (dict): Serialized assessment model from the self API
            staff_assessment (dict): Serialized assessment model from the staff API
            is_staff (bool): True if the grade details are being displayed to staff, else False.
                Default value is False (meaning grade details are being shown to the learner).

        Returns:
            A dictionary with full details about the submission's grade.

        Example:
            {
                criteria: [{
                    'label': 'Test name',
                    'name': 'f78ac7d4ca1e4134b0ba4b40ca212e72',
                    'prompt': 'Test prompt',
                    'order_num': 2,
                    'options': [...]
                    'feedback': [
                        'Good job!',
                        'Excellent work!',
                    ]
                }],
                additional_feedback: [{
                }]
                ...
            }
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api

        criteria = copy.deepcopy(self.rubric_criteria_with_labels)

        def has_feedback(assessments):
            """
            Returns True if at least one assessment has feedback.

            Args:
                assessments: A list of assessments

            Returns:
                Returns True if at least one assessment has feedback.
            """
            return any(
                (
                    assessment and
                    (assessment.get('feedback', None) or has_feedback(assessment.get('individual_assessments', [])))
                )
                for assessment in assessments
            )

        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        assessment_steps = self.assessment_steps
        if staff_assessment:
            median_scores = staff_api.get_assessment_scores_by_criteria(submission_uuid)
        elif "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)

        for criterion in criteria:
            criterion_name = criterion['name']

            # Record assessment info for the current criterion
            criterion['assessments'] = self._graded_assessments(
                submission_uuid, criterion,
                assessment_steps,
                staff_assessment,
                peer_assessments,
                self_assessment,
                is_staff=is_staff,
            )

            # Record whether there is any feedback provided in the assessments
            criterion['has_feedback'] = has_feedback(criterion['assessments'])

            # Although we prevent course authors from modifying criteria post-release,
            # it's still possible for assessments created by course staff to
            # have criteria that differ from the current problem definition.
            # It's also possible to circumvent the post-release restriction
            # if course authors directly import a course into Studio.
            # If this happens, we simply leave the score blank so that the grade
            # section can render without error.
            criterion['median_score'] = median_scores.get(criterion_name, '')
            criterion['total_value'] = max_scores.get(criterion_name, '')

        return {
            'criteria': criteria,
            'additional_feedback': self._additional_feedback(
                staff_assessment=staff_assessment,
                peer_assessments=peer_assessments,
                self_assessment=self_assessment,
            ),
        }
示例#5
0
    def _peer_median_option(self, submission_uuid, criterion):
        """
        Returns the option for the median peer grade.

        Args:
            submission_uuid (str): The id for the submission.
            criterion (dict): The criterion in question.

        Returns:
            The option for the median peer grade.

        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api

        median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        median_score = median_scores.get(criterion['name'], None)

        def median_options():
            """
            Returns a list of options that should be shown to represent the median.

            Some examples:
              1. Options A=1, B=3, and C=5, a median score of 3 returns [B].
              2. Options A=1, B=3, and C=5, a median score of 4 returns [B, C].
              3. Options A=1, B=1, and C=3, a median score of 1 returns [A, B]
              4. Options A=1, B=1, C=3, and D=3, a median score of 2 return [A, B, C, D]
              5. Options A=1, B=3 and C=5, a median score of 6 returns [C]
                 Note: 5 should not happen as a median should never be out of range.
            """
            last_score = None
            median_options = []

            # Sort the options first by name and then by points, so that if there
            # are options with identical points they will sort alphabetically rather
            # than randomly. Note that this depends upon sorted being a stable sort.
            alphabetical_options = sorted(criterion['options'],
                                          key=lambda option: option['label'])
            ordered_options = sorted(alphabetical_options,
                                     key=lambda option: option['points'])

            for option in ordered_options:
                current_score = option['points']

                # If we have reached a new score, then decide what to do next
                if current_score is not last_score:

                    # If the last score we saw was already larger than the median
                    # score, then we must have collected enough so return all
                    # the median options.
                    if last_score >= median_score:
                        return median_options

                    # If the current score is exactly the median or is less,
                    # then we don't need any previously collected scores.
                    if current_score <= median_score:
                        median_options = []

                    # Update the last score to be the current one
                    last_score = current_score

                # Collect the current option in case it is applicable
                median_options.append(option)
            return median_options

        # Calculate the full list of matching options for the median, and then:
        #  - If zero or one matches are found, then just return None or the single item.
        #  - If more than one match is found, return a dict with an aggregate label,
        #  - the median score, and no explanation (it is too verbose to show an aggregate).
        options = median_options()
        if len(options) == 0:
            # If we weren't able to get a median option when there should be one, show the following message
            # This happens when there are less than must_be_graded_by assessments made for the user
            if len(criterion['options']) > 0:
                return {'label': _('Waiting for peer reviews')}
            else:
                return None
        if len(options) == 1:
            return options[0]
        return {
            'label': u' / '.join([option['label'] for option in options]),
            'points': median_score,
            'explanation': None,
        }
示例#6
0
    def grade_details(self,
                      submission_uuid,
                      peer_assessments,
                      self_assessment,
                      staff_assessment,
                      is_staff=False):
        """
        Returns details about the grade assigned to the submission.

        Args:
            submission_uuid (str): The id of the submission being graded.
            peer_assessments (list of dict): Serialized assessment models from the peer API.
            self_assessment (dict): Serialized assessment model from the self API
            staff_assessment (dict): Serialized assessment model from the staff API
            is_staff (bool): True if the grade details are being displayed to staff, else False.
                Default value is False (meaning grade details are being shown to the learner).

        Returns:
            A dictionary with full details about the submission's grade.

        Example:
            {
                criteria: [{
                    'label': 'Test name',
                    'name': 'f78ac7d4ca1e4134b0ba4b40ca212e72',
                    'prompt': 'Test prompt',
                    'order_num': 2,
                    'options': [...]
                    'feedback': [
                        'Good job!',
                        'Excellent work!',
                    ]
                }],
                additional_feedback: [{
                }]
                ...
            }
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api

        criteria = copy.deepcopy(self.rubric_criteria_with_labels)

        def has_feedback(assessments):
            """
            Returns True if at least one assessment has feedback.

            Args:
                assessments: A list of assessments

            Returns:
                Returns True if at least one assessment has feedback.
            """
            return any((assessment and (
                assessment.get('feedback', None)
                or has_feedback(assessment.get('individual_assessments', []))))
                       for assessment in assessments)

        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        assessment_steps = self.assessment_steps
        if staff_assessment:
            median_scores = staff_api.get_assessment_scores_by_criteria(
                submission_uuid)
        elif "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(
                submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(
                submission_uuid)

        for criterion in criteria:
            criterion_name = criterion['name']

            # Record assessment info for the current criterion
            criterion['assessments'] = self._graded_assessments(
                submission_uuid,
                criterion,
                assessment_steps,
                staff_assessment,
                peer_assessments,
                self_assessment,
                is_staff=is_staff,
            )

            # Record whether there is any feedback provided in the assessments
            criterion['has_feedback'] = has_feedback(criterion['assessments'])

            # Although we prevent course authors from modifying criteria post-release,
            # it's still possible for assessments created by course staff to
            # have criteria that differ from the current problem definition.
            # It's also possible to circumvent the post-release restriction
            # if course authors directly import a course into Studio.
            # If this happens, we simply leave the score blank so that the grade
            # section can render without error.
            criterion['median_score'] = median_scores.get(criterion_name, '')
            criterion['total_value'] = max_scores.get(criterion_name, '')

        return {
            'criteria':
            criteria,
            'additional_feedback':
            self._additional_feedback(
                staff_assessment=staff_assessment,
                peer_assessments=peer_assessments,
                self_assessment=self_assessment,
            ),
        }
示例#7
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(asmnt)
                for asmnt in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid))

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = self._assessment_grade_context(
                ai_api.get_latest_assessment(submission_uuid))

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'student_submission':
            student_submission,
            'peer_assessments':
            peer_assessments,
            'self_assessment':
            self_assessment,
            'example_based_assessment':
            example_based_assessment,
            'rubric_criteria':
            self._rubric_criteria_grade_context(peer_assessments,
                                                self_assessment),
            'has_submitted_feedback':
            has_submitted_feedback,
            'allow_file_upload':
            self.allow_file_upload,
            'file_url':
            self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(
                submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(
                submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(
                submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                # Although we prevent course authors from modifying criteria post-release,
                # it's still possible for assessments created by course staff to
                # have criteria that differ from the current problem definition.
                # It's also possible to circumvent the post-release restriction
                # if course authors directly import a course into Studio.
                # If this happens, we simply leave the score blank so that the grade
                # section can render without error.
                criterion["median_score"] = median_scores.get(
                    criterion["name"], '')
                criterion["total_value"] = max_scores.get(
                    criterion["name"], '')

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
示例#8
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(asmnt)
                for asmnt in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid)
            )

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = self._assessment_grade_context(
                ai_api.get_latest_assessment(submission_uuid)
            )

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': self._rubric_criteria_grade_context(peer_assessments, self_assessment),
            'has_submitted_feedback': has_submitted_feedback,
            'allow_file_upload': self.allow_file_upload,
            'allow_latex': self.allow_latex,
            'file_url': self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                # Although we prevent course authors from modifying criteria post-release,
                # it's still possible for assessments created by course staff to
                # have criteria that differ from the current problem definition.
                # It's also possible to circumvent the post-release restriction
                # if course authors directly import a course into Studio.
                # If this happens, we simply leave the score blank so that the grade
                # section can render without error.
                criterion["median_score"] = median_scores.get(criterion["name"], '')
                criterion["total_value"] = max_scores.get(criterion["name"], '')

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
示例#9
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = peer_api.get_assessments(submission_uuid)
            has_submitted_feedback = feedback is not None
        else:
            feedback = None
            peer_assessments = []
            has_submitted_feedback = False

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
        else:
            self_assessment = None

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback': has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
示例#10
0
 def test_median_score_db_error(self, mock_filter):
     mock_filter.side_effect = DatabaseError("Bad things happened")
     tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
     peer_api.get_assessment_median_scores(tim["uuid"])
示例#11
0
    def handle(self, *args, **options):
        if len(args) != 2:
            raise CommandError("This command requires two arguments: <course_id> <username>")

        course_id, username, = args
        # Check args: course_id
        try:
            course_id = CourseLocator.from_string(course_id)
        except InvalidKeyError:
            raise CommandError("The course_id is not of the right format. It should be like 'org/course/run' or 'course-v1:org+course+run'")

        # Find course
        course_items = modulestore().get_items(course_id, qualifiers={'category': 'course'})
        if not course_items:
            raise CommandError("No such course was found.")

        # Find openassessment items
        oa_items = modulestore().get_items(course_id, qualifiers={'category': 'openassessment'})
        if not oa_items:
            raise CommandError("No openassessment item was found.")
        oa_items = sorted(oa_items, key=lambda item:item.start or datetime(2030, 1, 1, tzinfo=UTC()))
        print "Openassessment item(s):"
        oa_output = PrettyTable(['#', 'Item ID', 'Title'])
        oa_output.align = 'l'
        for i, oa_item in enumerate(oa_items):
            row = []
            row.append(i)
            row.append(oa_item.location)
            row.append(oa_item.title)
            oa_output.add_row(row)
        print oa_output
        while True:
            try:
                selected = raw_input("Choose an openassessment item # (empty to cancel): ")
                if selected == '':
                    print "Cancelled."
                    return
                selected = int(selected)
                oa_item = oa_items[selected]
                break
            except (IndexError, ValueError):
                print "WARN: Invalid number was detected. Choose again."
                continue

        item_location = oa_item.location

        # Get student_id from username
        # TODO: courseenrollment parameters can be used by only lms?
        students = User.objects.filter(username=username, is_active=True, courseenrollment__course_id=course_id, courseenrollment__is_active=True)
        if not students:
            raise CommandError("No such user was found.")
        student = students[0]
        anonymous_student_id = anonymous_id_for_user(student, course_id)

        # Get submission from student_id, course_id and item_location
        submission = get_submission(course_id, item_location, anonymous_student_id)

        # Print summary
        print_summary(course_id, oa_item, anonymous_student_id)

        while True:
            print "[0] Show the user's submission again."
            print "[1] Toggle the `scored` flag in the peer-assessment record."
            print "[2] Create a new peer-assessment record to the users."
            resp = raw_input("Choose an operation (empty to cancel): ")

            if resp == '0':
                print_summary(course_id, oa_item, anonymous_student_id)

            elif resp == '1':
                while True:
                    try:
                        selected_item_id = raw_input("Please input PeerWorkflowItem ID to toggle the `scored` flag (empty to cancel): ")
                        if selected_item_id == '':
                            print "Cancelled."
                            break
                        selected_item_id = int(selected_item_id)
                        selected_item = PeerWorkflowItem.objects.filter(id=selected_item_id, author=submission.id, submission_uuid=submission.submission_uuid, assessment__isnull=False)[0]
                    except (IndexError, ValueError):
                        print "WARN: Invalid ID was detected. Input again."
                        continue
                    # Update PeerWorkflowItem (assessment_peerworkflowitem record)
                    selected_item.scored = not selected_item.scored
                    selected_item.save()
                    # Update Score (submissions_score record)
                    latest_score = get_latest_score(submission)
                    if latest_score is not None:
                        max_scores = peer_api.get_rubric_max_scores(submission.submission_uuid)
                        try:
                            median_scores = peer_api.get_assessment_median_scores(submission.submission_uuid)
                        except:
                            median_scores = {}
                        sub_api.set_score(submission.submission_uuid, sum(median_scores.values()), sum(max_scores.values()))
                        #latest_score.points_earned = sum(median_scores.values())
                        #latest_score.created_at = now()
                        #latest_score.save()
                    # Update status of AssessmentWorkflow (workflow_assessmentworkflow record)
                    get_workflow_info(submission.submission_uuid, oa_item)

                    # Print summary
                    print_summary(course_id, oa_item, anonymous_student_id)

            elif resp == '2':
                while True:
                    staff_username = raw_input("Please input username to be given a new peer-assessment item (empty to cancel): ")
                    if staff_username == '':
                        print "Cancelled."
                        break
                    # TODO: courseenrollment parameters can be used by only lms?
                    staffs = User.objects.filter(username=staff_username, is_active=True, courseenrollment__course_id=course_id, courseenrollment__is_active=True)
                    if not staffs:
                        print "WARN: No such user was found in the course. Input again."
                        continue
                    staff = staffs[0]
                    anonymous_staff_id = anonymous_id_for_user(staff, course_id)
                    staff_submissions = PeerWorkflow.objects.filter(course_id=course_id, item_id=item_location, student_id=anonymous_staff_id)
                    if not staff_submissions:
                        print "WARN: This user hasn't posted any submission in this openassessment item yet. Input again."
                        continue
                    staff_submission = staff_submissions[0]
                    # Check if this user has already assessed the requested submission
                    items_assessed_by_staff = PeerWorkflowItem.objects.filter(
                        scorer=staff_submission,
                        author=submission,
                        submission_uuid=submission.submission_uuid
                    )
                    if len(items_assessed_by_staff) > 0:
                        print "WARN: This user has already assessed the requested submission. Input again."
                        continue
                    print "Staff submission:"
                    print_submission(staff_submission, oa_item)

                    while True:
                        resp = raw_input("Is this right? (y/n): ")
                        if resp.lower() == 'y':
                            new_items = PeerWorkflowItem.objects.filter(scorer_id=staff_submission.id, assessment__isnull=True).order_by('-started_at')
                            if new_items:
                                # Replace the author and submission_uuid
                                new_item = new_items[0]
                                new_item.author = submission
                                new_item.submission_uuid = submission.submission_uuid
                                new_item.started_at = now()
                            else:
                                new_item = PeerWorkflowItem.objects.create(
                                    scorer=staff_submission,
                                    author=submission,
                                    submission_uuid=submission.submission_uuid,
                                    started_at=now()
                                )
                            new_item.save()
                            print "Create a new peer-assessment record to %s successfully!" % staff.username
                            break
                        elif resp.lower() == 'n':
                            break
                        else:
                            continue

            elif resp == '':
                print "Cancelled."
                break
            else:
                print "WARN: Invalid number was detected. Choose again."
                continue
示例#12
0
    def handle(self, *args, **options):
        if len(args) != 2:
            raise CommandError(
                "This command requires two arguments: <course_id> <username>")

        course_id, username, = args
        # Check args: course_id
        try:
            course_id = CourseLocator.from_string(course_id)
        except InvalidKeyError:
            raise CommandError(
                "The course_id is not of the right format. It should be like 'org/course/run' or 'course-v1:org+course+run'"
            )

        # Find course
        course_items = modulestore().get_items(
            course_id, qualifiers={'category': 'course'})
        if not course_items:
            raise CommandError("No such course was found.")

        # Find openassessment items
        oa_items = modulestore().get_items(
            course_id, qualifiers={'category': 'openassessment'})
        if not oa_items:
            raise CommandError("No openassessment item was found.")
        oa_items = sorted(
            oa_items,
            key=lambda item: item.start or datetime(2030, 1, 1, tzinfo=UTC()))
        print "Openassessment item(s):"
        oa_output = PrettyTable(['#', 'Item ID', 'Title'])
        oa_output.align = 'l'
        for i, oa_item in enumerate(oa_items):
            row = []
            row.append(i)
            row.append(oa_item.location)
            row.append(oa_item.title)
            oa_output.add_row(row)
        print oa_output
        while True:
            try:
                selected = raw_input(
                    "Choose an openassessment item # (empty to cancel): ")
                if selected == '':
                    print "Cancelled."
                    return
                selected = int(selected)
                oa_item = oa_items[selected]
                break
            except (IndexError, ValueError):
                print "WARN: Invalid number was detected. Choose again."
                continue

        item_location = oa_item.location

        # Get student_id from username
        # TODO: courseenrollment parameters can be used by only lms?
        students = User.objects.filter(username=username,
                                       is_active=True,
                                       courseenrollment__course_id=course_id,
                                       courseenrollment__is_active=True)
        if not students:
            raise CommandError("No such user was found.")
        student = students[0]
        anonymous_student_id = anonymous_id_for_user(student, course_id)

        # Get submission from student_id, course_id and item_location
        submission = get_submission(course_id, item_location,
                                    anonymous_student_id)

        # Print summary
        print_summary(course_id, oa_item, anonymous_student_id)

        while True:
            print "[0] Show the user's submission again."
            print "[1] Toggle the `scored` flag in the peer-assessment record."
            print "[2] Create a new peer-assessment record to the users."
            resp = raw_input("Choose an operation (empty to cancel): ")

            if resp == '0':
                print_summary(course_id, oa_item, anonymous_student_id)

            elif resp == '1':
                while True:
                    try:
                        selected_item_id = raw_input(
                            "Please input PeerWorkflowItem ID to toggle the `scored` flag (empty to cancel): "
                        )
                        if selected_item_id == '':
                            print "Cancelled."
                            break
                        selected_item_id = int(selected_item_id)
                        selected_item = PeerWorkflowItem.objects.filter(
                            id=selected_item_id,
                            author=submission.id,
                            submission_uuid=submission.submission_uuid,
                            assessment__isnull=False)[0]
                    except (IndexError, ValueError):
                        print "WARN: Invalid ID was detected. Input again."
                        continue
                    # Update PeerWorkflowItem (assessment_peerworkflowitem record)
                    selected_item.scored = not selected_item.scored
                    selected_item.save()
                    # Update Score (submissions_score record)
                    latest_score = get_latest_score(submission)
                    if latest_score is not None:
                        max_scores = peer_api.get_rubric_max_scores(
                            submission.submission_uuid)
                        try:
                            median_scores = peer_api.get_assessment_median_scores(
                                submission.submission_uuid)
                        except:
                            median_scores = {}
                        sub_api.set_score(submission.submission_uuid,
                                          sum(median_scores.values()),
                                          sum(max_scores.values()))
                        #latest_score.points_earned = sum(median_scores.values())
                        #latest_score.created_at = now()
                        #latest_score.save()
                    # Update status of AssessmentWorkflow (workflow_assessmentworkflow record)
                    get_workflow_info(submission.submission_uuid, oa_item)

                    # Print summary
                    print_summary(course_id, oa_item, anonymous_student_id)

            elif resp == '2':
                while True:
                    staff_username = raw_input(
                        "Please input username to be given a new peer-assessment item (empty to cancel): "
                    )
                    if staff_username == '':
                        print "Cancelled."
                        break
                    # TODO: courseenrollment parameters can be used by only lms?
                    staffs = User.objects.filter(
                        username=staff_username,
                        is_active=True,
                        courseenrollment__course_id=course_id,
                        courseenrollment__is_active=True)
                    if not staffs:
                        print "WARN: No such user was found in the course. Input again."
                        continue
                    staff = staffs[0]
                    anonymous_staff_id = anonymous_id_for_user(
                        staff, course_id)
                    staff_submissions = PeerWorkflow.objects.filter(
                        course_id=course_id,
                        item_id=item_location,
                        student_id=anonymous_staff_id)
                    if not staff_submissions:
                        print "WARN: This user hasn't posted any submission in this openassessment item yet. Input again."
                        continue
                    staff_submission = staff_submissions[0]
                    # Check if this user has already assessed the requested submission
                    items_assessed_by_staff = PeerWorkflowItem.objects.filter(
                        scorer=staff_submission,
                        author=submission,
                        submission_uuid=submission.submission_uuid)
                    if len(items_assessed_by_staff) > 0:
                        print "WARN: This user has already assessed the requested submission. Input again."
                        continue
                    print "Staff submission:"
                    print_submission(staff_submission, oa_item)

                    while True:
                        resp = raw_input("Is this right? (y/n): ")
                        if resp.lower() == 'y':
                            new_items = PeerWorkflowItem.objects.filter(
                                scorer_id=staff_submission.id,
                                assessment__isnull=True).order_by(
                                    '-started_at')
                            if new_items:
                                # Replace the author and submission_uuid
                                new_item = new_items[0]
                                new_item.author = submission
                                new_item.submission_uuid = submission.submission_uuid
                                new_item.started_at = now()
                            else:
                                new_item = PeerWorkflowItem.objects.create(
                                    scorer=staff_submission,
                                    author=submission,
                                    submission_uuid=submission.submission_uuid,
                                    started_at=now())
                            new_item.save()
                            print "Create a new peer-assessment record to %s successfully!" % staff.username
                            break
                        elif resp.lower() == 'n':
                            break
                        else:
                            continue

            elif resp == '':
                print "Cancelled."
                break
            else:
                print "WARN: Invalid number was detected. Choose again."
                continue
示例#13
0
def print_summary(course_id, oa_item, anonymous_student_id):
    # Print submission
    submission = get_submission(course_id, oa_item.location,
                                anonymous_student_id)
    print "Submission status:"
    print_submission(submission, oa_item)

    # Print scored assessment(s)
    scored_items = PeerWorkflowItem.objects.filter(
        author=submission.id,
        submission_uuid=submission.submission_uuid,
        assessment__isnull=False,
        scored=True).order_by('assessment')
    print "Scored assessment(s):"
    if scored_items:
        scored_assessments = [
            scored_item.assessment for scored_item in scored_items
        ]
        scored_scores = scores_by_criterion(scored_assessments)
        median_score_dict = Assessment.get_median_score_dict(scored_scores)
        print_peerworkflowitem(scored_items, scored_scores)
    else:
        scored_scores = {}
        print "... No record was found."

    # Print not-scored assessment(s)
    not_scored_items = PeerWorkflowItem.objects.filter(
        author=submission.id,
        submission_uuid=submission.submission_uuid,
        assessment__isnull=False,
        scored=False).order_by('assessment')
    print "Not-scored assessment(s):"
    if not_scored_items:
        not_scored_assessments = [
            not_scored_item.assessment for not_scored_item in not_scored_items
        ]
        not_scored_scores = scores_by_criterion(not_scored_assessments)
        print_peerworkflowitem(not_scored_items, not_scored_scores)
    else:
        print "... No record was found."

    # Print latest score
    latest_score = get_latest_score(submission)
    print "Latest score:"
    if latest_score is not None:
        try:
            median_scores = peer_api.get_assessment_median_scores(
                submission.submission_uuid)
        except:
            median_scores = {}
        latest_score_output = PrettyTable(
            ['Score ID'] + scored_scores.keys() +
            ['Points earned', 'Points possible', 'Created at'])
        latest_score_output.align = 'l'
        row = []
        row.append(latest_score.id)
        row.extend([median_scores[k] for k in scored_scores.keys()])
        row.append(latest_score.points_earned)
        row.append(latest_score.points_possible)
        row.append(latest_score.created_at)
        latest_score_output.add_row(row)
        print latest_score_output
    else:
        print "... No record was found."
示例#14
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = peer_api.get_assessments(submission_uuid)
            has_submitted_feedback = feedback is not None
        else:
            feedback = None
            peer_assessments = []
            has_submitted_feedback = False

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
        else:
            self_assessment = None

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'student_submission':
            student_submission,
            'peer_assessments':
            peer_assessments,
            'self_assessment':
            self_assessment,
            'rubric_criteria':
            self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback':
            has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(
                submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(
                submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)