Example #1
0
 def test_create_assessment_no_submission(self):
     # Attempt to create a self-assessment for a submission that doesn't exist
     with self.assertRaises(SelfAssessmentRequestError):
         create_assessment(
             'invalid_submission_uuid', u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
             self.OPTIONS_SELECTED, self.RUBRIC,
             scored_at=datetime.datetime(2014, 4, 1)
         )
Example #2
0
    def test_create_assessment_wrong_user(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Attempt to create a self-assessment for the submission from a different user
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                'invalid_submission_uuid', u'another user',
                self.OPTIONS_SELECTED, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1)
            )
Example #3
0
    def test_create_assessment_missing_critieron(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Delete one of the criterion that's present in the rubric
        options = copy.deepcopy(self.OPTIONS_SELECTED)
        del options['clarity']

        # Attempt to create a self-assessment with options that do not match the rubric
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                options, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1)
            )
Example #4
0
    def test_create_assessment_invalid_option(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Mutate the selected option so the value does not match an available option
        options = copy.deepcopy(self.OPTIONS_SELECTED)
        options['clarity'] = 'invalid option'

        # Attempt to create a self-assessment with options that do not match the rubric
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                options, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1)
            )
Example #5
0
    def test_create_assessment(self):
        # Initially, there should be no submission or self assessment
        self.assertEqual(get_assessment("5"), None)

        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Now there should be a submission, but no self-assessment
        assessment = get_assessment(submission["uuid"])
        self.assertIs(assessment, None)
        self.assertFalse(is_complete(submission['uuid']))

        # Create a self-assessment for the submission
        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED, self.RUBRIC,
            scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
        )

        # Self-assessment should be complete
        self.assertTrue(is_complete(submission['uuid']))

        # Retrieve the self-assessment
        retrieved = get_assessment(submission["uuid"])

        # Check that the assessment we created matches the assessment we retrieved
        # and that both have the correct values
        self.assertItemsEqual(assessment, retrieved)
        self.assertEqual(assessment['submission_uuid'], submission['uuid'])
        self.assertEqual(assessment['points_earned'], 8)
        self.assertEqual(assessment['points_possible'], 10)
        self.assertEqual(assessment['feedback'], u'')
        self.assertEqual(assessment['score_type'], u'SE')
Example #6
0
 def test_open_completed_self_assessment(self, xblock):
     # Simulate the workflow being in the self assessment step
     # and we've created a self-assessment
     submission = xblock.create_submission(
         xblock.get_student_item_dict(), u"Đøɨn' ɨŧ ŧø đɇȺŧħ"
     )
     self_api.create_assessment(
         submission['uuid'],
         xblock.get_student_item_dict()['student_id'],
         {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
         {'criteria': xblock.rubric_criteria}
     )
     self._assert_path_and_context(
         xblock, 'openassessmentblock/self/oa_self_complete.html', {},
         workflow_status='self',
         submission_uuid=submission['uuid']
     )
Example #7
0
    def self_assess(self, data, suffix=''):
        """
        Create a self-assessment for a submission.

        Args:
            data (dict): Must have the following keys:
                submission_uuid (string): The unique identifier of the submission being assessed.
                options_selected (dict): Dictionary mapping criterion names to option values.

        Returns:
            Dict with keys "success" (bool) indicating success/failure
            and "msg" (unicode) containing additional information if an error occurs.
        """
        if 'submission_uuid' not in data:
            return {'success': False, 'msg': _(u"Missing submission_uuid key in request")}
        if 'options_selected' not in data:
            return {'success': False, 'msg': _(u"Missing options_selected key in request")}

        try:
            assessment = self_api.create_assessment(
                data['submission_uuid'],
                self.get_student_item_dict()['student_id'],
                data['options_selected'],
                {"criteria": self.rubric_criteria}
            )
            self.runtime.publish(
                self,
                "openassessmentblock.self_assess",
                {
                    "feedback": assessment["feedback"],
                    "rubric": {
                        "content_hash": assessment["rubric"]["content_hash"],
                    },
                    "scorer_id": assessment["scorer_id"],
                    "score_type": assessment["score_type"],
                    "scored_at": assessment["scored_at"],
                    "submission_uuid": assessment["submission_uuid"],
                    "parts": [
                        {
                            "option": {
                                "name": part["option"]["name"],
                                "points": part["option"]["points"]
                            }
                        }
                        for part in assessment["parts"]
                    ]
                }
            )
            # After we've created the self-assessment, we need to update the workflow.
            self.update_workflow_status()
        except self_api.SelfAssessmentRequestError as ex:
            msg = _(u"Could not create self assessment: {error}").format(error=ex.message)
            return {'success': False, 'msg': msg}
        except workflow_api.AssessmentWorkflowError as ex:
            msg = _(u"Could not update workflow: {error}").format(error=ex.message)
            return {'success': False, 'msg': msg}
        else:
            return {'success': True, 'msg': u""}
Example #8
0
    def self_assess(self, data, suffix=''):
        """
        Create a self-assessment for a submission.

        Args:
            data (dict): Must have the following keys:
                options_selected (dict): Dictionary mapping criterion names to option values.

        Returns:
            Dict with keys "success" (bool) indicating success/failure
            and "msg" (unicode) containing additional information if an error occurs.
        """
        if 'options_selected' not in data:
            return {
                'success': False,
                'msg': _(u"Missing options_selected key in request")
            }

        try:
            assessment = self_api.create_assessment(
                self.submission_uuid,
                self.get_student_item_dict()['student_id'],
                data['options_selected'], {"criteria": self.rubric_criteria})
            self.runtime.publish(
                self, "openassessmentblock.self_assess", {
                    "feedback":
                    assessment["feedback"],
                    "rubric": {
                        "content_hash": assessment["rubric"]["content_hash"],
                    },
                    "scorer_id":
                    assessment["scorer_id"],
                    "score_type":
                    assessment["score_type"],
                    "scored_at":
                    assessment["scored_at"],
                    "submission_uuid":
                    assessment["submission_uuid"],
                    "parts": [{
                        "option": {
                            "name": part["option"]["name"],
                            "points": part["option"]["points"]
                        }
                    } for part in assessment["parts"]]
                })
            # After we've created the self-assessment, we need to update the workflow.
            self.update_workflow_status()
        except self_api.SelfAssessmentRequestError as ex:
            msg = _(u"Could not create self assessment: {error}").format(
                error=ex.message)
            return {'success': False, 'msg': msg}
        except workflow_api.AssessmentWorkflowError as ex:
            msg = _(u"Could not update workflow: {error}").format(
                error=ex.message)
            return {'success': False, 'msg': msg}
        else:
            return {'success': True, 'msg': u""}
Example #9
0
    def test_create_multiple_self_assessments(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Self assess once
        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED, self.RUBRIC,
        )

        # Attempt to self-assess again, which should raise an exception
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                self.OPTIONS_SELECTED, self.RUBRIC,
            )

        # Expect that we still have the original assessment
        retrieved = get_assessment(submission["uuid"])
        self.assertItemsEqual(assessment, retrieved)
Example #10
0
    def test_completed_and_past_due(self, xblock):
        # Simulate having completed self assessment
        # Even though the problem is closed, we should still see
        # that we completed the step.
        submission = xblock.create_submission(
            xblock.get_student_item_dict(), u"Đøɨn' ɨŧ ŧø đɇȺŧħ"
        )
        self_api.create_assessment(
            submission['uuid'],
            xblock.get_student_item_dict()['student_id'],
            {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
            {'criteria': xblock.rubric_criteria}
        )

        # This case probably isn't possible, because presumably when we create
        # the self-assessment, the workflow status will be "waiting" or "done".
        # We're checking it anyway to be overly defensive: if the user has made a self-assessment,
        # we ALWAYS show complete, even if the workflow tells us we're still have status 'self'.
        self._assert_path_and_context(
            xblock, 'openassessmentblock/self/oa_self_complete.html',
            {'self_due': datetime.datetime(2000, 1, 1).replace(tzinfo=pytz.utc)},
            workflow_status='self',
            submission_uuid=submission['uuid']
        )
Example #11
0
    def test_create_assessment_timestamp(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Record the current system clock time
        before = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)

        # Create a self-assessment for the submission
        # Do not override the scored_at timestamp, so it should be set to the current time
        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED, self.RUBRIC,
        )

        # Retrieve the self-assessment
        retrieved = get_assessment(submission["uuid"])

        # Expect that both the created and retrieved assessments have the same
        # timestamp, and it's >= our recorded time.
        self.assertEqual(assessment['scored_at'], retrieved['scored_at'])
        self.assertGreaterEqual(assessment['scored_at'], before)
Example #12
0
    def handle(self, *args, **options):
        """
        Execute the command.

        Args:
            course_id (unicode): The ID of the course to create submissions for.
            item_id (unicode): The ID of the item in the course to create submissions for.
            num_submissions (int): Number of submissions to create.
        """
        if len(args) < 3:
            raise CommandError('Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>')

        course_id = unicode(args[0])
        item_id = unicode(args[1])

        try:
            num_submissions = int(args[2])
        except ValueError:
            raise CommandError('Number of submissions must be an integer')

        print u"Creating {num} submissions for {item} in {course}".format(
            num=num_submissions, item=item_id, course=course_id
        )

        for sub_num in range(num_submissions):

            print "Creating submission {num}".format(num=sub_num)

            # Create a dummy submission
            student_item = {
                'student_id': uuid4().hex[0:10],
                'course_id': course_id,
                'item_id': item_id,
                'item_type': 'openassessment'
            }
            submission_uuid = self._create_dummy_submission(student_item)
            self._student_items.append(student_item)

            # Create a dummy rubric
            rubric, options_selected = self._dummy_rubric()

            # Create peer assessments
            for num in range(self.NUM_PEER_ASSESSMENTS):
                print "-- Creating peer-assessment {num}".format(num=num)

                scorer_id = 'test_{num}'.format(num=num)

                # The scorer needs to make a submission before assessing
                scorer_student_item = copy.copy(student_item)
                scorer_student_item['student_id'] = scorer_id
                self._create_dummy_submission(scorer_student_item)

                # Retrieve the submission we want to score
                # Note that we are NOT using the priority queue here, since we know
                # exactly which submission we want to score.
                peer_api.create_peer_workflow_item(scorer_id, submission_uuid)

                # Create the peer assessment
                assessment = {
                    'options_selected': options_selected,
                    'feedback': "  ".join(loremipsum.get_paragraphs(2))
                }
                peer_api.create_assessment(
                    submission_uuid,
                    scorer_id,
                    assessment,
                    rubric,
                    self.NUM_PEER_ASSESSMENTS
                )

            # Create a self-assessment
            print "-- Creating self assessment"
            self_api.create_assessment(
                submission_uuid, student_item['student_id'],
                options_selected, rubric
            )
Example #13
0
    def _create_submission_and_assessments(
        self, xblock, submission_text, peers, peer_assessments, self_assessment,
        waiting_for_peer=False
    ):
        """
        Create a submission and peer/self assessments, so that the user can receive a grade.

        Args:
            xblock (OpenAssessmentBlock): The XBlock, loaded for the user who needs a grade.
            submission_text (unicode): Text of the submission from the user.
            peers (list of unicode): List of user IDs of peers who will assess the user.
            peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
            self_assessment (dict): Dict of assessment for self-assessment.

        Kwargs:
            waiting_for_peer (bool): If true, skip creation of peer assessments for the user's submission.

        Returns:
            None

        """
        # Create a submission from the user
        student_item = xblock.get_student_item_dict()
        student_id = student_item['student_id']
        submission = xblock.create_submission(student_item, submission_text)

        # Create submissions and assessments from other users
        scorer_submissions = []
        for scorer_name, assessment in zip(peers, peer_assessments):

            # Create a submission for each scorer for the same problem
            scorer = copy.deepcopy(student_item)
            scorer['student_id'] = scorer_name

            scorer_sub = sub_api.create_submission(scorer, {'text': submission_text})
            workflow_api.create_workflow(scorer_sub['uuid'])

            submission = peer_api.get_submission_to_assess(scorer_sub['uuid'], len(peers))

            # Store the scorer's submission so our user can assess it later
            scorer_submissions.append(scorer_sub)

            # Create an assessment of the user's submission
            if not waiting_for_peer:
                peer_api.create_assessment(
                    scorer_sub['uuid'], scorer_name,
                    assessment['options_selected'],
                    assessment['criterion_feedback'],
                    assessment['overall_feedback'],
                    {'criteria': xblock.rubric_criteria},
                    xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
                )

        # Have our user make assessments (so she can get a score)
        for asmnt in peer_assessments:
            peer_api.get_submission_to_assess(submission['uuid'], len(peers))
            peer_api.create_assessment(
                submission['uuid'],
                student_id,
                asmnt['options_selected'],
                asmnt['criterion_feedback'],
                asmnt['overall_feedback'],
                {'criteria': xblock.rubric_criteria},
                xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
            )

        # Have the user submit a self-assessment (so she can get a score)
        if self_assessment is not None:
            self_api.create_assessment(
                submission['uuid'], student_id, self_assessment['options_selected'],
                {'criteria': xblock.rubric_criteria}
            )
Example #14
0
    def _create_submission_and_assessments(self,
                                           xblock,
                                           submission_text,
                                           peers,
                                           peer_assessments,
                                           self_assessment,
                                           waiting_for_peer=False):
        """
        Create a submission and peer/self assessments, so that the user can receive a grade.

        Args:
            xblock (OpenAssessmentBlock): The XBlock, loaded for the user who needs a grade.
            submission_text (unicode): Text of the submission from the user.
            peers (list of unicode): List of user IDs of peers who will assess the user.
            peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
            self_assessment (dict): Dict of assessment for self-assessment.

        Kwargs:
            waiting_for_peer (bool): If true, skip creation of peer assessments for the user's submission.

        Returns:
            None

        """
        # Create a submission from the user
        student_item = xblock.get_student_item_dict()
        student_id = student_item['student_id']
        submission = xblock.create_submission(student_item, submission_text)

        # Create submissions and assessments from other users
        scorer_submissions = []
        for scorer_name, assessment in zip(peers, peer_assessments):

            # Create a submission for each scorer for the same problem
            scorer = copy.deepcopy(student_item)
            scorer['student_id'] = scorer_name

            scorer_sub = sub_api.create_submission(scorer,
                                                   {'text': submission_text})
            workflow_api.create_workflow(scorer_sub['uuid'])

            submission = peer_api.get_submission_to_assess(
                scorer_sub['uuid'], len(peers))

            # Store the scorer's submission so our user can assess it later
            scorer_submissions.append(scorer_sub)

            # Create an assessment of the user's submission
            if not waiting_for_peer:
                peer_api.create_assessment(
                    scorer_sub['uuid'], scorer_name,
                    assessment['options_selected'],
                    assessment['criterion_feedback'],
                    assessment['overall_feedback'],
                    {'criteria': xblock.rubric_criteria},
                    xblock.get_assessment_module(
                        'peer-assessment')['must_be_graded_by'])

        # Have our user make assessments (so she can get a score)
        for asmnt in peer_assessments:
            peer_api.get_submission_to_assess(submission['uuid'], len(peers))
            peer_api.create_assessment(
                submission['uuid'], student_id, asmnt['options_selected'],
                asmnt['criterion_feedback'], asmnt['overall_feedback'],
                {'criteria': xblock.rubric_criteria},
                xblock.get_assessment_module(
                    'peer-assessment')['must_be_graded_by'])

        # Have the user submit a self-assessment (so she can get a score)
        if self_assessment is not None:
            self_api.create_assessment(submission['uuid'], student_id,
                                       self_assessment['options_selected'],
                                       {'criteria': xblock.rubric_criteria})
Example #15
0
    def handle(self, *args, **options):
        """
        Execute the command.

        Args:
            course_id (unicode): The ID of the course to create submissions for.
            item_id (unicode): The ID of the item in the course to create submissions for.
            num_submissions (int): Number of submissions to create.
        """
        if len(args) < 3:
            raise CommandError(
                'Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>'
            )

        course_id = unicode(args[0])
        item_id = unicode(args[1])

        try:
            num_submissions = int(args[2])
        except ValueError:
            raise CommandError('Number of submissions must be an integer')

        print u"Creating {num} submissions for {item} in {course}".format(
            num=num_submissions, item=item_id, course=course_id)

        for sub_num in range(num_submissions):

            print "Creating submission {num}".format(num=sub_num)

            # Create a dummy submission
            student_item = {
                'student_id': uuid4().hex[0:10],
                'course_id': course_id,
                'item_id': item_id,
                'item_type': 'openassessment'
            }
            submission_uuid = self._create_dummy_submission(student_item)
            self._student_items.append(student_item)

            # Create a dummy rubric
            rubric, options_selected = self._dummy_rubric()

            # Create peer assessments
            for num in range(self.NUM_PEER_ASSESSMENTS):
                print "-- Creating peer-assessment {num}".format(num=num)

                scorer_id = 'test_{num}'.format(num=num)

                # The scorer needs to make a submission before assessing
                scorer_student_item = copy.copy(student_item)
                scorer_student_item['student_id'] = scorer_id
                scorer_submission_uuid = self._create_dummy_submission(
                    scorer_student_item)

                # Retrieve the submission we want to score
                # Note that we are NOT using the priority queue here, since we know
                # exactly which submission we want to score.
                peer_api.create_peer_workflow_item(scorer_submission_uuid,
                                                   submission_uuid)

                # Create the peer assessment
                peer_api.create_assessment(
                    scorer_submission_uuid, scorer_id, options_selected, {},
                    "  ".join(loremipsum.get_paragraphs(2)), rubric,
                    self.NUM_PEER_ASSESSMENTS)

            # Create a self-assessment
            print "-- Creating self assessment"
            self_api.create_assessment(submission_uuid,
                                       student_item['student_id'],
                                       options_selected, rubric)