Exemplo n.º 1
0
    def test_open_ended_flow_reset(self):
        """
        Test the flow of the module if we complete the self assessment step and then reset
        @return:
        """
        assessment = [0, 1]
        module = self.get_module_from_location(self.problem_location, COURSE)

        #Simulate a student saving an answer
        module.handle_ajax("save_answer", {"student_answer": self.answer})
        status = module.handle_ajax("get_status", {})
        self.assertTrue(isinstance(status, basestring))

        #Mock a student submitting an assessment
        assessment_dict = MockQueryDict()
        assessment_dict.update({'assessment': sum(assessment), 'score_list[]': assessment})
        module.handle_ajax("save_assessment", assessment_dict)
        task_one_json = json.loads(module.task_states[0])
        self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
        status = module.handle_ajax("get_status", {})
        self.assertTrue(isinstance(status, basestring))

        #Move to the next step in the problem
        module.handle_ajax("next_problem", {})
        self.assertEqual(module.current_task_number, 0)

        html = module.get_html()
        self.assertTrue(isinstance(html, basestring))

        rubric = module.handle_ajax("get_combined_rubric", {})
        self.assertTrue(isinstance(rubric, basestring))
        self.assertEqual(module.state, "assessing")
        module.handle_ajax("reset", {})
        self.assertEqual(module.current_task_number, 0)
Exemplo n.º 2
0
    def test_reset_fail(self):
        """
       Test the flow of the module if we complete the self assessment step and then reset
       Since the problem only allows one attempt, should fail.
       @return:
       """
        assessment = [0, 1]
        module = self.get_module_from_location(self.problem_location, COURSE)

        #Simulate a student saving an answer
        module.handle_ajax("save_answer", {"student_answer": self.answer})
        status = module.handle_ajax("get_status", {})
        self.assertTrue(isinstance(status, basestring))

        #Mock a student submitting an assessment
        assessment_dict = MockQueryDict()
        assessment_dict.update({
            'assessment': sum(assessment),
            'score_list[]': assessment
        })
        module.handle_ajax("save_assessment", assessment_dict)
        task_one_json = json.loads(module.task_states[0])
        self.assertEqual(
            json.loads(task_one_json['child_history'][0]['post_assessment']),
            assessment)
        status = module.handle_ajax("get_status", {})
        self.assertTrue(isinstance(status, basestring))

        #Move to the next step in the problem
        module.handle_ajax("next_problem", {})
        self.assertEqual(module.current_task_number, 0)

        html = module.get_html()
        self.assertTrue(isinstance(html, basestring))

        #Module should now be done
        rubric = module.handle_ajax("get_combined_rubric", {})
        self.assertTrue(isinstance(rubric, basestring))
        self.assertEqual(module.state, "done")

        #Try to reset, should fail because only 1 attempt is allowed
        reset_data = json.loads(module.handle_ajax("reset", {}))
        self.assertEqual(reset_data['success'], False)
    def test_reset_fail(self):
        """
       Test the flow of the module if we complete the self assessment step and then reset
       Since the problem only allows one attempt, should fail.
       @return:
       """
        assessment = [0, 1]
        module = self.get_module_from_location(self.problem_location, COURSE)

        #Simulate a student saving an answer
        module.handle_ajax("save_answer", {"student_answer": self.answer})
        status = module.handle_ajax("get_status", {})
        self.assertTrue(isinstance(status, basestring))

        #Mock a student submitting an assessment
        assessment_dict = MockQueryDict()
        assessment_dict.update({'assessment': sum(assessment), 'score_list[]': assessment})
        module.handle_ajax("save_assessment", assessment_dict)
        task_one_json = json.loads(module.task_states[0])
        self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
        status = module.handle_ajax("get_status", {})
        self.assertTrue(isinstance(status, basestring))

        #Move to the next step in the problem
        module.handle_ajax("next_problem", {})
        self.assertEqual(module.current_task_number, 0)

        html = module.get_html()
        self.assertTrue(isinstance(html, basestring))

        #Module should now be done
        rubric = module.handle_ajax("get_combined_rubric", {})
        self.assertTrue(isinstance(rubric, basestring))
        self.assertEqual(module.state, "done")

        #Try to reset, should fail because only 1 attempt is allowed
        reset_data = json.loads(module.handle_ajax("reset", {}))
        self.assertEqual(reset_data['success'], False)
Exemplo n.º 4
0
    def test_open_ended_flow_correct(self):
        """
        Test a two step problem where the student first goes through the self assessment step, and then the
        open ended step.
        @return:
        """
        assessment = [1, 1]
        #Load the module
        module = self.get_module_from_location(self.problem_location, COURSE)

        #Simulate a student saving an answer
        module.handle_ajax("save_answer", {"student_answer": self.answer})
        status = module.handle_ajax("get_status", {})
        self.assertTrue(isinstance(status, basestring))

        #Mock a student submitting an assessment
        assessment_dict = MockQueryDict()
        assessment_dict.update({'assessment': sum(assessment), 'score_list[]': assessment})
        #from nose.tools import set_trace; set_trace()
        module.handle_ajax("save_assessment", assessment_dict)
        task_one_json = json.loads(module.task_states[0])
        self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
        module.handle_ajax("get_status", {})

        #Move to the next step in the problem
        try:
            module.handle_ajax("next_problem", {})
        except GradingServiceError:
            #This error is okay.  We don't have a grading service to connect to!
            pass
        self.assertEqual(module.current_task_number, 1)
        try:
            module.get_html()
        except GradingServiceError:
            #This error is okay.  We don't have a grading service to connect to!
            pass

        #Try to get the rubric from the module
        module.handle_ajax("get_combined_rubric", {})

        #Make a fake reply from the queue
        queue_reply = {
            'queuekey': "",
            'xqueue_body': json.dumps({
                'score': 0,
                'feedback': json.dumps({"spelling": "Spelling: Ok.", "grammar": "Grammar: Ok.",
                                        "markup-text": " all of us can think of a book that we hope none of our children or any other children have taken off the shelf . but if i have the right to remove that book from the shelf that work i abhor then you also have exactly the same right and so does everyone else . and then we <bg>have no books left</bg> on the shelf for any of us . <bs>katherine</bs> <bs>paterson</bs> , author write a persuasive essay to a newspaper reflecting your vies on censorship <bg>in libraries . do</bg> you believe that certain materials , such as books , music , movies , magazines , <bg>etc . , should be</bg> removed from the shelves if they are found <bg>offensive ? support your</bg> position with convincing arguments from your own experience , observations <bg>, and or reading .</bg> "}),
                'grader_type': "ML",
                'success': True,
                'grader_id': 1,
                'submission_id': 1,
                'rubric_xml': "<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>",
                'rubric_scores_complete': True,
            })
        }

        module.handle_ajax("check_for_score", {})

        #Update the module with the fake queue reply
        module.handle_ajax("score_update", queue_reply)
        self.assertFalse(module.ready_to_reset)
        self.assertEqual(module.current_task_number, 1)

        #Get html and other data client will request
        module.get_html()
        legend = module.handle_ajax("get_legend", {})
        self.assertTrue(isinstance(legend, basestring))

        module.handle_ajax("get_status", {})
        module.handle_ajax("skip_post_assessment", {})
        self.assertTrue(isinstance(legend, basestring))

        #Get all results
        module.handle_ajax("get_results", {})

        #reset the problem
        module.handle_ajax("reset", {})
        self.assertEqual(module.state, "initial")
Exemplo n.º 5
0
class PeerGradingModuleTest(unittest.TestCase, DummyModulestore):
    """
    Test peer grading xmodule at the unit level.  More detailed tests are difficult, as the module relies on an
    external grading service.
    """
    problem_location = Location(
        ["i4x", "edX", "open_ended", "peergrading", "PeerGradingSample"])
    calibrated_dict = {'location': "blah"}
    save_dict = MockQueryDict()
    save_dict.update({
        'location': "blah",
        'submission_id': 1,
        'submission_key': "",
        'score': 1,
        'feedback': "",
        'rubric_scores[]': [0, 1],
        'submission_flagged': False,
        'answer_unknown': False,
    })

    def setUp(self):
        """
        Create a peer grading module from a test system
        @return:
        """
        self.test_system = get_test_system()
        self.test_system.open_ended_grading_interface = None
        self.setup_modulestore(COURSE)
        self.peer_grading = self.get_module_from_location(
            self.problem_location, COURSE)

    def test_module_closed(self):
        """
        Test if peer grading is closed
        @return:
        """
        closed = self.peer_grading.closed()
        self.assertEqual(closed, False)

    def test_get_html(self):
        """
        Test to see if the module can be rendered
        @return:
        """
        html = self.peer_grading.get_html()

    def test_get_data(self):
        """
        Try getting data from the external grading service
        @return:
        """
        success, data = self.peer_grading.query_data_for_location(
            self.problem_location.url())
        self.assertEqual(success, True)

    def test_get_score(self):
        """
        Test getting the score
        @return:
        """
        score = self.peer_grading.get_score()
        self.assertEquals(score['score'], None)

    def test_get_max_score(self):
        """
        Test getting the max score
        @return:
        """
        max_score = self.peer_grading.max_score()
        self.assertEquals(max_score, None)

    def get_next_submission(self):
        """
        Test to see if we can get the next mock submission
        @return:
        """
        success, next_submission = self.peer_grading.get_next_submission(
            {'location': 'blah'})
        self.assertEqual(success, True)

    def test_save_grade(self):
        """
        Test if we can save the grade
        @return:
        """
        response = self.peer_grading.save_grade(self.save_dict)
        self.assertEqual(response['success'], True)

    def test_is_student_calibrated(self):
        """
        Check to see if the student has calibrated yet
        @return:
        """
        calibrated_dict = {'location': "blah"}
        response = self.peer_grading.is_student_calibrated(
            self.calibrated_dict)
        self.assertEqual(response['success'], True)

    def test_show_calibration_essay(self):
        """
        Test showing the calibration essay
        @return:
        """
        response = self.peer_grading.show_calibration_essay(
            self.calibrated_dict)
        self.assertEqual(response['success'], True)

    def test_save_calibration_essay(self):
        """
        Test saving the calibration essay
        @return:
        """
        response = self.peer_grading.save_calibration_essay(self.save_dict)
        self.assertEqual(response['success'], True)

    def test_peer_grading_problem(self):
        """
        See if we can render a single problem
        @return:
        """
        response = self.peer_grading.peer_grading_problem(self.calibrated_dict)
        self.assertEqual(response['success'], True)

    def test_get_instance_state(self):
        """
        Get the instance state dict
        @return:
        """
        self.peer_grading.get_instance_state()