コード例 #1
0
    def test_open_ended_flow_reset(self):
        """
        Test the flow of the module if we complete the self assessment step and then reset
        @return:
        """
        assessment = [0, 1]
        module = self.get_module_from_location(self.problem_location, COURSE)

        #Simulate a student saving an answer
        html = module.handle_ajax("get_html", {})
        module.handle_ajax("save_answer", {"student_answer": self.answer, "can_upload_files": False, "student_file": None})
        html = module.handle_ajax("get_html", {})

        #Mock a student submitting an assessment
        assessment_dict = MockQueryDict()
        assessment_dict.update({'assessment': sum(assessment), 'score_list[]': assessment})
        module.handle_ajax("save_assessment", assessment_dict)
        task_one_json = json.loads(module.task_states[0])
        self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
        rubric = module.handle_ajax("get_combined_rubric", {})

        #Move to the next step in the problem
        module.handle_ajax("next_problem", {})
        self.assertEqual(module.current_task_number, 0)

        html = module.get_html()
        self.assertTrue(isinstance(html, basestring))

        rubric = module.handle_ajax("get_combined_rubric", {})
        self.assertTrue(isinstance(rubric, basestring))
        self.assertEqual(module.state, "assessing")
        module.handle_ajax("reset", {})
        self.assertEqual(module.current_task_number, 0)
コード例 #2
0
    def test_reset_fail(self):
        """
       Test the flow of the module if we complete the self assessment step and then reset
       Since the problem only allows one attempt, should fail.
       @return:
       """
        assessment = [0, 1]
        module = self.get_module_from_location(self.problem_location, COURSE)

        #Simulate a student saving an answer
        module.handle_ajax("save_answer", {"student_answer": self.answer})

        #Mock a student submitting an assessment
        assessment_dict = MockQueryDict()
        assessment_dict.update({'assessment': sum(assessment), 'score_list[]': assessment})
        module.handle_ajax("save_assessment", assessment_dict)
        task_one_json = json.loads(module.task_states[0])
        self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)

        #Move to the next step in the problem
        module.handle_ajax("next_problem", {})
        self.assertEqual(module.current_task_number, 0)

        html = module.get_html()
        self.assertTrue(isinstance(html, basestring))

        #Module should now be done
        rubric = module.handle_ajax("get_combined_rubric", {})
        self.assertTrue(isinstance(rubric, basestring))
        self.assertEqual(module.state, "done")

        #Try to reset, should fail because only 1 attempt is allowed
        reset_data = json.loads(module.handle_ajax("reset", {}))
        self.assertEqual(reset_data['success'], False)
コード例 #3
0
    def test_reset_fail(self):
        """
       Test the flow of the module if we complete the self assessment step and then reset
       Since the problem only allows one attempt, should fail.
       @return:
       """
        assessment = [0, 1]
        module = self.get_module_from_location(self.problem_location, COURSE)

        #Simulate a student saving an answer
        module.handle_ajax("save_answer", {"student_answer": self.answer})

        #Mock a student submitting an assessment
        assessment_dict = MockQueryDict()
        assessment_dict.update({'assessment': sum(assessment), 'score_list[]': assessment})
        module.handle_ajax("save_assessment", assessment_dict)
        task_one_json = json.loads(module.task_states[0])
        self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)

        #Move to the next step in the problem
        module.handle_ajax("next_problem", {})
        self.assertEqual(module.current_task_number, 0)

        html = module.get_html()
        self.assertTrue(isinstance(html, basestring))

        #Module should now be done
        rubric = module.handle_ajax("get_combined_rubric", {})
        self.assertTrue(isinstance(rubric, basestring))
        self.assertEqual(module.state, "done")

        #Try to reset, should fail because only 1 attempt is allowed
        reset_data = json.loads(module.handle_ajax("reset", {}))
        self.assertEqual(reset_data['success'], False)
コード例 #4
0
    def test_open_ended_flow_reset(self):
        """
        Test the flow of the module if we complete the self assessment step and then reset
        @return:
        """
        assessment = [0, 1]
        module = self.get_module_from_location(self.problem_location, COURSE)

        #Simulate a student saving an answer
        html = module.handle_ajax("get_html", {})
        module.handle_ajax("save_answer", {"student_answer": self.answer, "can_upload_files" : False, "student_file" : None})
        html = module.handle_ajax("get_html", {})

        #Mock a student submitting an assessment
        assessment_dict = MockQueryDict()
        assessment_dict.update({'assessment': sum(assessment), 'score_list[]': assessment})
        module.handle_ajax("save_assessment", assessment_dict)
        task_one_json = json.loads(module.task_states[0])
        self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
        rubric = module.handle_ajax("get_combined_rubric", {})

        #Move to the next step in the problem
        module.handle_ajax("next_problem", {})
        self.assertEqual(module.current_task_number, 0)

        html = module.get_html()
        self.assertTrue(isinstance(html, basestring))

        rubric = module.handle_ajax("get_combined_rubric", {})
        self.assertTrue(isinstance(rubric, basestring))
        self.assertEqual(module.state, "assessing")
        module.handle_ajax("reset", {})
        self.assertEqual(module.current_task_number, 0)
コード例 #5
0
    def test_self_assessment_display(self):
        """
        Test storing an answer with the self assessment module.
        """

        # Create a module with no state yet.  Important that this start off as a blank slate.
        test_module = SelfAssessmentModule(get_test_system(), self.location,
                                           self.definition, self.descriptor,
                                           self.static_data)

        saved_response = "Saved response."
        submitted_response = "Submitted response."

        # Initially, there will be no stored answer.
        self.assertEqual(test_module.stored_answer, None)
        # And the initial answer to display will be an empty string.
        self.assertEqual(test_module.get_display_answer(), "")

        # Now, store an answer in the module.
        test_module.handle_ajax("store_answer",
                                {'student_answer': saved_response},
                                get_test_system())
        # The stored answer should now equal our response.
        self.assertEqual(test_module.stored_answer, saved_response)
        self.assertEqual(test_module.get_display_answer(), saved_response)

        # Submit a student response to the question.
        test_module.handle_ajax("save_answer",
                                {"student_answer": submitted_response},
                                get_test_system())
        # Submitting an answer should clear the stored answer.
        self.assertEqual(test_module.stored_answer, None)
        # Confirm that the answer is stored properly.
        self.assertEqual(test_module.latest_answer(), submitted_response)

        # Mock saving an assessment.
        assessment = [0]
        assessment_dict = MockQueryDict({
            'assessment': sum(assessment),
            'score_list[]': assessment
        })
        data = test_module.handle_ajax("save_assessment", assessment_dict,
                                       get_test_system())
        self.assertTrue(json.loads(data)['success'])

        # Reset the module so the student can try again.
        test_module.reset(get_test_system())

        # Confirm that the right response is loaded.
        self.assertEqual(test_module.get_display_answer(), submitted_response)
コード例 #6
0
    def test_open_ended_flow_correct(self):
        """
        Test a two step problem where the student first goes through the self assessment step, and then the
        open ended step.
        @return:
        """
        assessment = [1, 1]
        # Load the module
        module = self.get_module_from_location(self.problem_location, COURSE)

        # Simulate a student saving an answer
        module.handle_ajax("save_answer", {"student_answer": self.answer})
        module.save()
        status = module.handle_ajax("get_status", {})
        module.save()
        self.assertIsInstance(status, basestring)

        # Mock a student submitting an assessment
        assessment_dict = MockQueryDict()
        assessment_dict.update({'assessment': sum(assessment), 'score_list[]': assessment})
        module.handle_ajax("save_assessment", assessment_dict)
        module.save()
        task_one_json = json.loads(module.task_states[0])
        self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)

        # Move to the next step in the problem
        try:
            module.handle_ajax("next_problem", {})
            module.save()
        except GradingServiceError:
            # This error is okay.  We don't have a grading service to connect to!
            pass
        self.assertEqual(module.current_task_number, 1)
        try:
            module.render('student_view')
        except GradingServiceError:
            # This error is okay.  We don't have a grading service to connect to!
            pass

        # Try to get the rubric from the module
        module.handle_ajax("get_combined_rubric", {})
        module.save()

        # Make a fake reply from the queue
        queue_reply = {
            'queuekey': "",
            'xqueue_body': json.dumps({
                'score': 0,
                'feedback': json.dumps({"spelling": "Spelling: Ok.", "grammar": "Grammar: Ok.",
                                        "markup-text": " all of us can think of a book that we hope none of our children or any other children have taken off the shelf . but if i have the right to remove that book from the shelf that work i abhor then you also have exactly the same right and so does everyone else . and then we <bg>have no books left</bg> on the shelf for any of us . <bs>katherine</bs> <bs>paterson</bs> , author write a persuasive essay to a newspaper reflecting your vies on censorship <bg>in libraries . do</bg> you believe that certain materials , such as books , music , movies , magazines , <bg>etc . , should be</bg> removed from the shelves if they are found <bg>offensive ? support your</bg> position with convincing arguments from your own experience , observations <bg>, and or reading .</bg> "}),
                'grader_type': "ML",
                'success': True,
                'grader_id': 1,
                'submission_id': 1,
                'rubric_xml': "<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>",
                'rubric_scores_complete': True,
            })
        }

        module.handle_ajax("check_for_score", {})
        module.save()

        # Update the module with the fake queue reply
        module.handle_ajax("score_update", queue_reply)
        module.save()
        self.assertFalse(module.ready_to_reset)
        self.assertEqual(module.current_task_number, 1)

        # Get html and other data client will request
        module.render('student_view')

        module.handle_ajax("skip_post_assessment", {})
        module.save()

        # Get all results
        module.handle_ajax("get_combined_rubric", {})
        module.save()

        # reset the problem
        module.handle_ajax("reset", {})
        module.save()
        self.assertEqual(module.state, "initial")
コード例 #7
0
    def test_open_ended_flow_correct(self):
        """
        Test a two step problem where the student first goes through the self assessment step, and then the
        open ended step.
        @return:
        """
        assessment = [1, 1]
        # Load the module
        module = self.get_module_from_location(self.problem_location, COURSE)

        # Simulate a student saving an answer
        module.handle_ajax("save_answer", {"student_answer": self.answer})
        module.save()
        status = module.handle_ajax("get_status", {})
        module.save()
        self.assertIsInstance(status, basestring)

        # Mock a student submitting an assessment
        assessment_dict = MockQueryDict()
        assessment_dict.update({
            'assessment': sum(assessment),
            'score_list[]': assessment
        })
        module.handle_ajax("save_assessment", assessment_dict)
        module.save()
        task_one_json = json.loads(module.task_states[0])
        self.assertEqual(
            json.loads(task_one_json['child_history'][0]['post_assessment']),
            assessment)

        # Move to the next step in the problem
        try:
            module.handle_ajax("next_problem", {})
            module.save()
        except GradingServiceError:
            # This error is okay.  We don't have a grading service to connect to!
            pass
        self.assertEqual(module.current_task_number, 1)
        try:
            module.render('student_view')
        except GradingServiceError:
            # This error is okay.  We don't have a grading service to connect to!
            pass

        # Try to get the rubric from the module
        module.handle_ajax("get_combined_rubric", {})
        module.save()

        # Make a fake reply from the queue
        queue_reply = {
            'queuekey':
            "",
            'xqueue_body':
            json.dumps({
                'score':
                0,
                'feedback':
                json.dumps({
                    "spelling":
                    "Spelling: Ok.",
                    "grammar":
                    "Grammar: Ok.",
                    "markup-text":
                    " all of us can think of a book that we hope none of our children or any other children have taken off the shelf . but if i have the right to remove that book from the shelf that work i abhor then you also have exactly the same right and so does everyone else . and then we <bg>have no books left</bg> on the shelf for any of us . <bs>katherine</bs> <bs>paterson</bs> , author write a persuasive essay to a newspaper reflecting your vies on censorship <bg>in libraries . do</bg> you believe that certain materials , such as books , music , movies , magazines , <bg>etc . , should be</bg> removed from the shelves if they are found <bg>offensive ? support your</bg> position with convincing arguments from your own experience , observations <bg>, and or reading .</bg> "
                }),
                'grader_type':
                "ML",
                'success':
                True,
                'grader_id':
                1,
                'submission_id':
                1,
                'rubric_xml':
                "<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>",
                'rubric_scores_complete':
                True,
            })
        }

        module.handle_ajax("check_for_score", {})
        module.save()

        # Update the module with the fake queue reply
        module.handle_ajax("score_update", queue_reply)
        module.save()
        self.assertFalse(module.ready_to_reset)
        self.assertEqual(module.current_task_number, 1)

        # Get html and other data client will request
        module.render('student_view')

        module.handle_ajax("skip_post_assessment", {})
        module.save()

        # Get all results
        module.handle_ajax("get_combined_rubric", {})
        module.save()

        # reset the problem
        module.handle_ajax("reset", {})
        module.save()
        self.assertEqual(module.state, "initial")
コード例 #8
0
class PeerGradingModuleTest(unittest.TestCase, DummyModulestore):
    """
    Test peer grading xmodule at the unit level.  More detailed tests are difficult, as the module relies on an
    external grading service.
    """
    problem_location = Location(
        ["i4x", "edX", "open_ended", "peergrading", "PeerGradingSample"])
    coe_location = Location(
        ["i4x", "edX", "open_ended", "combinedopenended", "SampleQuestion"])
    calibrated_dict = {'location': "blah"}
    coe_dict = {'location': coe_location.url()}
    save_dict = MockQueryDict()
    save_dict.update({
        'location': "blah",
        'submission_id': 1,
        'submission_key': "",
        'score': 1,
        'feedback': "",
        'rubric_scores[]': [0, 1],
        'submission_flagged': False,
        'answer_unknown': False,
    })

    def setUp(self):
        """
        Create a peer grading module from a test system
        @return:
        """
        self.test_system = get_test_system()
        self.test_system.open_ended_grading_interface = None
        self.setup_modulestore(COURSE)
        self.peer_grading = self.get_module_from_location(
            self.problem_location, COURSE)
        self.coe = self.get_module_from_location(self.coe_location, COURSE)

    def test_module_closed(self):
        """
        Test if peer grading is closed
        @return:
        """
        closed = self.peer_grading.closed()
        self.assertFalse(closed)

    def test_get_html(self):
        """
        Test to see if the module can be rendered
        @return:
        """
        _html = self.peer_grading.get_html()

    def test_get_data(self):
        """
        Try getting data from the external grading service
        @return:
        """
        success, _data = self.peer_grading.query_data_for_location(
            self.problem_location.url())
        self.assertTrue(success)

    def test_get_score(self):
        """
        Test getting the score
        @return:
        """
        score = self.peer_grading.get_score()
        self.assertIsNone(score['score'])

    def test_get_max_score(self):
        """
        Test getting the max score
        @return:
        """
        max_score = self.peer_grading.max_score()
        self.assertEquals(max_score, None)

    def get_next_submission(self):
        """
        Test to see if we can get the next mock submission
        @return:
        """
        success, _next_submission = self.peer_grading.get_next_submission(
            {'location': 'blah'})
        self.assertEqual(success, True)

    def test_save_grade(self):
        """
        Test if we can save the grade
        @return:
        """
        response = self.peer_grading.save_grade(self.save_dict)
        self.assertEqual(response['success'], True)

    def test_is_student_calibrated(self):
        """
        Check to see if the student has calibrated yet
        @return:
        """
        response = self.peer_grading.is_student_calibrated(
            self.calibrated_dict)
        self.assertTrue(response['success'])

    def test_show_calibration_essay(self):
        """
        Test showing the calibration essay
        @return:
        """
        response = self.peer_grading.show_calibration_essay(
            self.calibrated_dict)
        self.assertTrue(response['success'])

    def test_save_calibration_essay(self):
        """
        Test saving the calibration essay
        @return:
        """
        response = self.peer_grading.save_calibration_essay(self.save_dict)
        self.assertTrue(response['success'])

    def test_peer_grading_problem(self):
        """
        See if we can render a single problem
        @return:
        """
        response = self.peer_grading.peer_grading_problem(self.coe_dict)
        self.assertTrue(response['success'])

    def test___find_corresponding_module_for_location_exceptions(self):
        """
        Unit test for the exception cases of __find_corresponding_module_for_location
        Mainly for diff coverage
        @return:
        """
        with self.assertRaises(ItemNotFoundError):
            self.peer_grading._find_corresponding_module_for_location(
                Location('i4x', 'a', 'b', 'c', 'd'))

    def test_get_instance_state(self):
        """
        Get the instance state dict
        @return:
        """
        self.peer_grading.get_instance_state()