示例#1
0
 def _assert_task_failure(self, entry_id, task_type, problem_url_name, expected_message):
     """Confirm that expected values are stored in InstructorTask on task failure."""
     instructor_task = InstructorTask.objects.get(id=entry_id)
     self.assertEqual(instructor_task.task_state, FAILURE)
     self.assertEqual(instructor_task.requester.username, 'instructor')
     self.assertEqual(instructor_task.task_type, task_type)
     task_input = json.loads(instructor_task.task_input)
     self.assertNotIn('student', task_input)
     self.assertEqual(task_input['problem_url'], text_type(InstructorTaskModuleTestCase.problem_location(problem_url_name)))
     status = json.loads(instructor_task.task_output)
     self.assertEqual(status['exception'], 'ZeroDivisionError')
     self.assertEqual(status['message'], expected_message)
     # check status returned:
     status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
     self.assertEqual(status['message'], expected_message)
示例#2
0
 def _assert_task_failure(self, entry_id, task_type, problem_url_name, expected_message):
     """Confirm that expected values are stored in InstructorTask on task failure."""
     instructor_task = InstructorTask.objects.get(id=entry_id)
     assert instructor_task.task_state == FAILURE
     assert instructor_task.requester.username == 'instructor'
     assert instructor_task.task_type == task_type
     task_input = json.loads(instructor_task.task_input)
     assert 'student' not in task_input
     assert task_input['problem_url'] == str(InstructorTaskModuleTestCase.problem_location(problem_url_name))
     status = json.loads(instructor_task.task_output)
     assert status['exception'] == 'ZeroDivisionError'
     assert status['message'] == expected_message
     # check status returned:
     status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
     assert status['message'] == expected_message
示例#3
0
 def _assert_task_failure(self, entry_id, task_type, problem_url_name, expected_message):
     """Confirm that expected values are stored in InstructorTask on task failure."""
     instructor_task = InstructorTask.objects.get(id=entry_id)
     self.assertEqual(instructor_task.task_state, FAILURE)
     self.assertEqual(instructor_task.requester.username, 'instructor')
     self.assertEqual(instructor_task.task_type, task_type)
     task_input = json.loads(instructor_task.task_input)
     self.assertNotIn('student', task_input)
     self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string())
     status = json.loads(instructor_task.task_output)
     self.assertEqual(status['exception'], 'ZeroDivisionError')
     self.assertEqual(status['message'], expected_message)
     # check status returned:
     status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
     self.assertEqual(status['message'], expected_message)
    def test_rescoring_bad_unicode_input(self):
        """Generate a real failure in rescoring a problem, with an answer including unicode"""
        # At one point, the student answers that resulted in StudentInputErrors were being
        # persisted (even though they were not counted as an attempt).  That is not possible
        # now, so it's harder to generate a test for how such input is handled.
        problem_url_name = 'H1P1'
        # set up an option problem -- doesn't matter really what problem it is, but we need
        # it to have an answer.
        self.define_option_problem(problem_url_name)
        self.submit_student_answer('u1', problem_url_name,
                                   [OPTION_1, OPTION_1])

        # return an input error as if it were a numerical response, with an embedded unicode character:
        expected_message = u"Could not interpret '2/3\u03a9' as a number"
        with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers'
                   ) as mock_rescore:
            mock_rescore.side_effect = StudentInputError(expected_message)
            instructor_task = self.submit_rescore_all_student_answers(
                'instructor', problem_url_name)

        # check instructor_task returned
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
        self.assertEqual(instructor_task.task_state, 'SUCCESS')
        self.assertEqual(instructor_task.requester.username, 'instructor')
        self.assertEqual(instructor_task.task_type, 'rescore_problem')
        task_input = json.loads(instructor_task.task_input)
        self.assertNotIn('student', task_input)
        self.assertEqual(
            task_input['problem_url'],
            InstructorTaskModuleTestCase.problem_location(
                problem_url_name).to_deprecated_string())
        status = json.loads(instructor_task.task_output)
        self.assertEqual(status['attempted'], 1)
        self.assertEqual(status['succeeded'], 0)
        self.assertEqual(status['total'], 1)
示例#5
0
 def submit_rescore_all_student_answers(self, instructor, problem_url_name, only_if_higher=False):
     """Submits the particular problem for rescoring"""
     return submit_rescore_problem_for_all_students(
         self.create_task_request(instructor),
         InstructorTaskModuleTestCase.problem_location(problem_url_name),
         only_if_higher,
     )
示例#6
0
    def test_rescoring_bad_unicode_input(self):
        """Generate a real failure in rescoring a problem, with an answer including unicode"""
        # At one point, the student answers that resulted in StudentInputErrors were being
        # persisted (even though they were not counted as an attempt).  That is not possible
        # now, so it's harder to generate a test for how such input is handled.
        problem_url_name = 'H1P1'
        # set up an option problem -- doesn't matter really what problem it is, but we need
        # it to have an answer.
        self.define_option_problem(problem_url_name)
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])

        # return an input error as if it were a numerical response, with an embedded unicode character:
        expected_message = u"Could not interpret '2/3\u03a9' as a number"
        with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
            mock_rescore.side_effect = StudentInputError(expected_message)
            instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)

        # check instructor_task returned
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
        self.assertEqual(instructor_task.task_state, 'SUCCESS')
        self.assertEqual(instructor_task.requester.username, 'instructor')
        self.assertEqual(instructor_task.task_type, 'rescore_problem')
        task_input = json.loads(instructor_task.task_input)
        self.assertNotIn('student', task_input)
        self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string())
        status = json.loads(instructor_task.task_output)
        self.assertEqual(status['attempted'], 1)
        self.assertEqual(status['succeeded'], 0)
        self.assertEqual(status['total'], 1)
示例#7
0
 def render_problem(self, username, problem_url_name):
     """
     Use ajax interface to request html for a problem.
     """
     # make sure that the requested user is logged in, so that the ajax call works
     # on the right problem:
     self.login_username(username)
     # make ajax call:
     modx_url = reverse(
         'xblock_handler',
         kwargs={
             'course_id':
             text_type(self.course.id),
             'usage_id':
             quote_slashes(
                 text_type(
                     InstructorTaskModuleTestCase.problem_location(
                         problem_url_name))),
             'handler':
             'xmodule_handler',
             'suffix':
             'problem_get',
         })
     resp = self.client.post(modx_url, {})
     return resp
示例#8
0
 def submit_rescore_one_student_answer(self, instructor, problem_url_name,
                                       student):
     """Submits the particular problem for rescoring for a particular student"""
     return submit_rescore_problem_for_student(
         self.create_task_request(instructor),
         InstructorTaskModuleTestCase.problem_location(problem_url_name),
         student)
示例#9
0
 def submit_rescore_all_student_answers(self, instructor, problem_url_name, only_if_higher=False):
     """Submits the particular problem for rescoring"""
     return submit_rescore_problem_for_all_students(
         self.create_task_request(instructor),
         InstructorTaskModuleTestCase.problem_location(problem_url_name),
         only_if_higher,
     )
    def test_rescoring_code_problem(self):
        """Run rescore scenario on problem with code submission"""
        problem_url_name = 'H1P2'
        self.define_code_response_problem(problem_url_name)
        # we fully create the CodeResponse problem, but just pretend that we're queuing it:
        with patch('capa.xqueue_interface.XQueueInterface.send_to_queue'
                   ) as mock_send_to_queue:
            mock_send_to_queue.return_value = (0, "Successfully queued")
            self.submit_student_answer('u1', problem_url_name,
                                       ["answer1", "answer2"])

        instructor_task = self.submit_rescore_all_student_answers(
            'instructor', problem_url_name)

        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
        self.assertEqual(instructor_task.task_state, FAILURE)
        status = json.loads(instructor_task.task_output)
        self.assertEqual(status['exception'], 'NotImplementedError')
        self.assertEqual(status['message'],
                         "Problem's definition does not support rescoring.")

        status = InstructorTaskModuleTestCase.get_task_status(
            instructor_task.task_id)
        self.assertEqual(status['message'],
                         "Problem's definition does not support rescoring.")
示例#11
0
    def test_rescoring_option_problem(self, problem_edit, new_expected_scores,
                                      new_expected_max):
        """
        Run rescore scenario on option problem.
        Verify rescoring updates grade after content change.
        Original problem definition has:
            num_inputs = 1
            num_responses = 2
            correct_answer = OPTION_1
        """
        # get descriptor:
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
        location = InstructorTaskModuleTestCase.problem_location(
            problem_url_name)
        descriptor = self.module_store.get_item(location)

        # first store answers for each of the separate users:
        self.submit_student_answer('u1', problem_url_name,
                                   [OPTION_1, OPTION_1])
        self.submit_student_answer('u2', problem_url_name,
                                   [OPTION_1, OPTION_2])
        self.submit_student_answer('u3', problem_url_name,
                                   [OPTION_2, OPTION_1])
        self.submit_student_answer('u4', problem_url_name,
                                   [OPTION_2, OPTION_2])

        # verify each user's grade
        expected_original_scores = (2, 1, 1, 0)
        expected_original_max = 2
        for i, user in enumerate(self.users):
            self.check_state(user, descriptor, expected_original_scores[i],
                             expected_original_max)

        # update the data in the problem definition so the answer changes.
        self.redefine_option_problem(problem_url_name, **problem_edit)

        # confirm that simply rendering the problem again does not change the grade
        self.render_problem('u1', problem_url_name)
        self.check_state(self.user1, descriptor, expected_original_scores[0],
                         expected_original_max)

        # rescore the problem for only one student -- only that student's grade should change:
        self.submit_rescore_one_student_answer('instructor', problem_url_name,
                                               self.user1)
        self.check_state(self.user1, descriptor, new_expected_scores[0],
                         new_expected_max)
        for i, user in enumerate(self.users[1:],
                                 start=1):  # everyone other than user1
            self.check_state(user, descriptor, expected_original_scores[i],
                             expected_original_max)

        # rescore the problem for all students
        self.submit_rescore_all_student_answers('instructor', problem_url_name)
        for i, user in enumerate(self.users):
            self.check_state(user, descriptor, new_expected_scores[i],
                             new_expected_max)
示例#12
0
 def _test_submit_with_long_url(self, task_function, student=None):
     problem_url_name = 'x' * 255
     self.define_option_problem(problem_url_name)
     location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
     with self.assertRaises(ValueError):
         if student is not None:
             task_function(self.create_task_request(self.instructor), location, student)
         else:
             task_function(self.create_task_request(self.instructor), location)
示例#13
0
 def _test_submit_with_long_url(self, task_function, student=None):
     problem_url_name = 'x' * 255
     self.define_option_problem(problem_url_name)
     location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
     with self.assertRaises(ValueError):
         if student is not None:
             task_function(self.create_task_request(self.instructor), location, student)
         else:
             task_function(self.create_task_request(self.instructor), location)
    def test_rescoring_randomized_problem(self):
        """Run rescore scenario on custom problem that uses randomize"""
        # First define the custom response problem:
        problem_url_name = 'H1P1'
        self.define_randomized_custom_response_problem(problem_url_name)
        location = InstructorTaskModuleTestCase.problem_location(
            problem_url_name)
        descriptor = self.module_store.get_item(location)
        # run with more than one user
        for user in self.users:
            # first render the problem, so that a seed will be created for this user
            self.render_problem(user.username, problem_url_name)
            # submit a bogus answer, in order to get the problem to tell us its real answer
            dummy_answer = "1000"
            self.submit_student_answer(user.username, problem_url_name,
                                       [dummy_answer, dummy_answer])
            # we should have gotten the problem wrong, since we're way out of range:
            self.check_state(user, descriptor, 0, 1, expected_attempts=1)
            # dig the correct answer out of the problem's message
            module = self.get_student_module(user.username, descriptor)
            state = json.loads(module.state)
            correct_map = state['correct_map']
            log.info("Correct Map: %s", correct_map)
            # only one response, so pull it out:
            answer = correct_map.values()[0]['msg']
            self.submit_student_answer(user.username, problem_url_name,
                                       [answer, answer])
            # we should now get the problem right, with a second attempt:
            self.check_state(user, descriptor, 1, 1, expected_attempts=2)

        # redefine the problem (as stored in Mongo) so that the definition of correct changes
        self.define_randomized_custom_response_problem(problem_url_name,
                                                       redefine=True)
        # confirm that simply rendering the problem again does not result in a change
        # in the grade (or the attempts):
        self.render_problem('u1', problem_url_name)
        self.check_state(self.user1, descriptor, 1, 1, expected_attempts=2)

        # rescore the problem for only one student -- only that student's grade should change
        # (and none of the attempts):
        self.submit_rescore_one_student_answer('instructor', problem_url_name,
                                               User.objects.get(username='******'))
        for user in self.users:
            expected_score = 0 if user.username == 'u1' else 1
            self.check_state(user,
                             descriptor,
                             expected_score,
                             1,
                             expected_attempts=2)

        # rescore the problem for all students
        self.submit_rescore_all_student_answers('instructor', problem_url_name)

        # all grades should change to being wrong (with no change in attempts)
        for user in self.users:
            self.check_state(user, descriptor, 0, 1, expected_attempts=2)
    def verify_rescore_results(self, problem_edit, new_expected_scores,
                               new_expected_max, rescore_if_higher):
        """
        Common helper to verify the results of rescoring for a single
        student and all students are as expected.
        """
        # get descriptor:
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
        location = InstructorTaskModuleTestCase.problem_location(
            problem_url_name)
        descriptor = self.module_store.get_item(location)

        # first store answers for each of the separate users:
        self.submit_student_answer('u1', problem_url_name,
                                   [OPTION_1, OPTION_1])
        self.submit_student_answer('u2', problem_url_name,
                                   [OPTION_1, OPTION_2])
        self.submit_student_answer('u3', problem_url_name,
                                   [OPTION_2, OPTION_1])
        self.submit_student_answer('u4', problem_url_name,
                                   [OPTION_2, OPTION_2])

        # verify each user's grade
        expected_original_scores = (2, 1, 1, 0)
        expected_original_max = 2
        for i, user in enumerate(self.users):
            self.check_state(user, descriptor, expected_original_scores[i],
                             expected_original_max)

        # update the data in the problem definition so the answer changes.
        self.redefine_option_problem(problem_url_name, **problem_edit)

        # confirm that simply rendering the problem again does not change the grade
        self.render_problem('u1', problem_url_name)
        self.check_state(self.user1, descriptor, expected_original_scores[0],
                         expected_original_max)

        # rescore the problem for only one student -- only that student's grade should change:
        self.submit_rescore_one_student_answer('instructor', problem_url_name,
                                               self.user1, rescore_if_higher)
        self.check_state(self.user1, descriptor, new_expected_scores[0],
                         new_expected_max)
        for i, user in enumerate(self.users[1:],
                                 start=1):  # everyone other than user1
            self.check_state(user, descriptor, expected_original_scores[i],
                             expected_original_max)

        # rescore the problem for all students
        self.submit_rescore_all_student_answers('instructor', problem_url_name,
                                                rescore_if_higher)
        for i, user in enumerate(self.users):
            self.check_state(user, descriptor, new_expected_scores[i],
                             new_expected_max)
示例#16
0
    def test_delete_failure(self):
        """Simulate a failure in deleting state of a problem"""
        problem_url_name = 'H1P1'
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
        self.define_option_problem(problem_url_name)
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])

        expected_message = "bad things happened"
        with patch('courseware.models.StudentModule.delete') as mock_delete:
            mock_delete.side_effect = ZeroDivisionError(expected_message)
            instructor_task = self.delete_problem_state('instructor', location)
        self._assert_task_failure(instructor_task.id, 'delete_problem_state', problem_url_name, expected_message)
示例#17
0
    def test_delete_failure(self):
        """Simulate a failure in deleting state of a problem"""
        problem_url_name = 'H1P1'
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
        self.define_option_problem(problem_url_name)
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])

        expected_message = "bad things happened"
        with patch('courseware.models.StudentModule.delete') as mock_delete:
            mock_delete.side_effect = ZeroDivisionError(expected_message)
            instructor_task = self.delete_problem_state('instructor', location)
        self._assert_task_failure(instructor_task.id, 'delete_problem_state', problem_url_name, expected_message)
示例#18
0
 def test_submit_nonexistent_modules(self):
     # confirm that a rescore of a non-existent module returns an exception
     problem_url = InstructorTaskModuleTestCase.problem_location("NonexistentProblem")
     request = None
     with pytest.raises(ItemNotFoundError):
         submit_rescore_problem_for_student(request, problem_url, self.student)
     with pytest.raises(ItemNotFoundError):
         submit_rescore_problem_for_all_students(request, problem_url)
     with pytest.raises(ItemNotFoundError):
         submit_reset_problem_attempts_for_all_students(request, problem_url)
     with pytest.raises(ItemNotFoundError):
         submit_delete_problem_state_for_all_students(request, problem_url)
示例#19
0
    def test_reset_failure(self):
        """Simulate a failure in resetting attempts on a problem"""
        problem_url_name = 'H1P1'
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
        self.define_option_problem(problem_url_name)
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])

        expected_message = "bad things happened"
        with patch('lms.djangoapps.courseware.models.StudentModule.save') as mock_save:
            mock_save.side_effect = ZeroDivisionError(expected_message)
            instructor_task = self.reset_problem_attempts('instructor', location)
        self._assert_task_failure(instructor_task.id, 'reset_problem_attempts', problem_url_name, expected_message)
示例#20
0
 def test_submit_nonexistent_modules(self):
     # confirm that a rescore of a non-existent module returns an exception
     problem_url = InstructorTaskModuleTestCase.problem_location("NonexistentProblem")
     request = None
     with self.assertRaises(ItemNotFoundError):
         submit_rescore_problem_for_student(request, problem_url, self.student)
     with self.assertRaises(ItemNotFoundError):
         submit_rescore_problem_for_all_students(request, problem_url)
     with self.assertRaises(ItemNotFoundError):
         submit_reset_problem_attempts_for_all_students(request, problem_url)
     with self.assertRaises(ItemNotFoundError):
         submit_delete_problem_state_for_all_students(request, problem_url)
示例#21
0
 def render_problem(self, username, problem_url_name):
     """
     Use ajax interface to request html for a problem.
     """
     # make sure that the requested user is logged in, so that the ajax call works
     # on the right problem:
     self.login_username(username)
     # make ajax call:
     modx_url = reverse('xblock_handler', kwargs={
         'course_id': self.course.id.to_deprecated_string(),
         'usage_id': quote_slashes(InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string()),
         'handler': 'xmodule_handler',
         'suffix': 'problem_get',
     })
     resp = self.client.post(modx_url, {})
     return resp
示例#22
0
    def test_rescoring_randomized_problem(self):
        """Run rescore scenario on custom problem that uses randomize"""
        # First define the custom response problem:
        problem_url_name = 'H1P1'
        self.define_randomized_custom_response_problem(problem_url_name)
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
        descriptor = self.module_store.get_item(location)
        # run with more than one user
        for user in self.users:
            # first render the problem, so that a seed will be created for this user
            self.render_problem(user.username, problem_url_name)
            # submit a bogus answer, in order to get the problem to tell us its real answer
            dummy_answer = "1000"
            self.submit_student_answer(user.username, problem_url_name, [dummy_answer, dummy_answer])
            # we should have gotten the problem wrong, since we're way out of range:
            self.check_state(user, descriptor, 0, 1, expected_attempts=1)
            # dig the correct answer out of the problem's message
            module = self.get_student_module(user.username, descriptor)
            state = json.loads(module.state)
            correct_map = state['correct_map']
            log.info("Correct Map: %s", correct_map)
            # only one response, so pull it out:
            answer = correct_map.values()[0]['msg']
            self.submit_student_answer(user.username, problem_url_name, [answer, answer])
            # we should now get the problem right, with a second attempt:
            self.check_state(user, descriptor, 1, 1, expected_attempts=2)

        # redefine the problem (as stored in Mongo) so that the definition of correct changes
        self.define_randomized_custom_response_problem(problem_url_name, redefine=True)
        # confirm that simply rendering the problem again does not result in a change
        # in the grade (or the attempts):
        self.render_problem('u1', problem_url_name)
        self.check_state(self.user1, descriptor, 1, 1, expected_attempts=2)

        # rescore the problem for only one student -- only that student's grade should change
        # (and none of the attempts):
        self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='******'))
        for user in self.users:
            expected_score = 0 if user.username == 'u1' else 1
            self.check_state(user, descriptor, expected_score, 1, expected_attempts=2)

        # rescore the problem for all students
        self.submit_rescore_all_student_answers('instructor', problem_url_name)

        # all grades should change to being wrong (with no change in attempts)
        for user in self.users:
            self.check_state(user, descriptor, 0, 1, expected_attempts=2)
示例#23
0
 def test_delete_problem_state(self):
     """Run delete-state scenario on option problem"""
     # get descriptor:
     problem_url_name = 'H1P1'
     self.define_option_problem(problem_url_name)
     location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
     descriptor = self.module_store.get_item(location)
     # first store answers for each of the separate users:
     for username in self.userlist:
         self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
     # confirm that state exists:
     for username in self.userlist:
         self.assertIsNotNone(self.get_student_module(username, descriptor))
     # run delete task:
     self.delete_problem_state('instructor', location)
     # confirm that no state can be found:
     for username in self.userlist:
         with self.assertRaises(StudentModule.DoesNotExist):
             self.get_student_module(username, descriptor)
示例#24
0
    def test_rescoring_code_problem(self):
        """Run rescore scenario on problem with code submission"""
        problem_url_name = 'H1P2'
        self.define_code_response_problem(problem_url_name)
        # we fully create the CodeResponse problem, but just pretend that we're queuing it:
        with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_send_to_queue:
            mock_send_to_queue.return_value = (0, "Successfully queued")
            self.submit_student_answer('u1', problem_url_name, ["answer1", "answer2"])

        instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)

        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
        self.assertEqual(instructor_task.task_state, FAILURE)
        status = json.loads(instructor_task.task_output)
        self.assertEqual(status['exception'], 'NotImplementedError')
        self.assertEqual(status['message'], "Problem's definition does not support rescoring.")

        status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)
        self.assertEqual(status['message'], "Problem's definition does not support rescoring.")
示例#25
0
 def test_delete_problem_state(self):
     """Run delete-state scenario on option problem"""
     # get descriptor:
     problem_url_name = 'H1P1'
     self.define_option_problem(problem_url_name)
     location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
     descriptor = self.module_store.get_item(location)
     # first store answers for each of the separate users:
     for username in self.userlist:
         self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])
     # confirm that state exists:
     for username in self.userlist:
         self.assertIsNotNone(self.get_student_module(username, descriptor))
     # run delete task:
     self.delete_problem_state('instructor', location)
     # confirm that no state can be found:
     for username in self.userlist:
         with self.assertRaises(StudentModule.DoesNotExist):
             self.get_student_module(username, descriptor)
示例#26
0
    def test_submit_task(self, task_function, expected_task_type, params=None):
        """
        Tests submission of instructor task.
        """
        if params is None:
            params = {}
        if params.get('student'):
            params['student'] = self.student

        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
        location = InstructorTaskModuleTestCase.problem_location(
            problem_url_name)

        # unsuccessful submission, exception raised while submitting.
        with patch(
                'lms.djangoapps.instructor_task.tasks_base.BaseInstructorTask.apply_async'
        ) as apply_async:

            error = Exception()
            apply_async.side_effect = error

            with pytest.raises(QueueConnectionError):
                instructor_task = task_function(
                    self.create_task_request(self.instructor), location,
                    **params)

            most_recent_task = InstructorTask.objects.latest('id')
            assert most_recent_task.task_state == FAILURE

        # successful submission
        instructor_task = task_function(
            self.create_task_request(self.instructor), location, **params)
        assert instructor_task.task_type == expected_task_type

        # test resubmitting, by updating the existing record:
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
        instructor_task.task_state = PROGRESS
        instructor_task.save()

        with pytest.raises(AlreadyRunningError):
            task_function(self.create_task_request(self.instructor), location,
                          **params)
    def define_randomized_custom_response_problem(self,
                                                  problem_url_name,
                                                  redefine=False):
        """
        Defines a custom response problem that uses a random value to determine correctness.

        Generated answer is also returned as the `msg`, so that the value can be used as a
        correct answer by a test.

        If the `redefine` flag is set, then change the definition of correctness (from equals
        to not-equals).
        """
        factory = CustomResponseXMLFactory()
        script = textwrap.dedent("""
                def check_func(expect, answer_given):
                    expected = str(random.randint(0, 100))
                    return {'ok': answer_given %s expected, 'msg': expected}
            """ % ('!=' if redefine else '=='))
        problem_xml = factory.build_xml(script=script,
                                        cfn="check_func",
                                        expect="42",
                                        num_responses=1)
        if redefine:
            descriptor = self.module_store.get_item(
                InstructorTaskModuleTestCase.problem_location(
                    problem_url_name))
            descriptor.data = problem_xml
            with self.module_store.branch_setting(
                    ModuleStoreEnum.Branch.draft_preferred,
                    descriptor.location.course_key):
                self.module_store.update_item(descriptor, self.user.id)
                self.module_store.publish(descriptor.location, self.user.id)
        else:
            # Use "per-student" rerandomization so that check-problem can be called more than once.
            # Using "always" means we cannot check a problem twice, but we want to call once to get the
            # correct answer, and call a second time with that answer to confirm it's graded as correct.
            # Per-student rerandomization will at least generate different seeds for different users, so
            # we get a little more test coverage.
            ItemFactory.create(parent_location=self.problem_section.location,
                               category="problem",
                               display_name=str(problem_url_name),
                               data=problem_xml,
                               metadata={"rerandomize": "per_student"})
    def test_rescoring_if_higher_scores_equal(self):
        """
        Specifically tests rescore when the previous and new raw scores are equal. In this case, the scores should
        be updated.
        """
        problem_edit = dict(
            num_inputs=2
        )  # this change to the problem means the problem will now have a max score of 4
        unchanged_max = 2
        new_max = 4
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
        location = InstructorTaskModuleTestCase.problem_location(
            problem_url_name)
        descriptor = self.module_store.get_item(location)

        # first store answers for each of the separate users:
        self.submit_student_answer('u1', problem_url_name,
                                   [OPTION_1, OPTION_1])
        self.submit_student_answer('u2', problem_url_name,
                                   [OPTION_2, OPTION_2])

        # verify each user's grade
        self.check_state(self.user1, descriptor, 2, 2)  # user 1 has a 2/2
        self.check_state(self.user2, descriptor, 0, 2)  # user 2 has a 0/2

        # update the data in the problem definition so the answer changes.
        self.redefine_option_problem(problem_url_name, **problem_edit)

        # confirm that simply rendering the problem again does not change the grade
        self.render_problem('u1', problem_url_name)
        self.check_state(self.user1, descriptor, 2, 2)
        self.check_state(self.user2, descriptor, 0, 2)

        # rescore the problem for all students
        self.submit_rescore_all_student_answers('instructor', problem_url_name,
                                                True)

        # user 1's score would go down, so it remains 2/2. user 2's score was 0/2, which is equivalent to the new score
        # of 0/4, so user 2's score changes to 0/4.
        self.check_state(self.user1, descriptor, 2, unchanged_max)
        self.check_state(self.user2, descriptor, 0, new_max)
示例#29
0
    def _test_submit_task(self, task_function, student=None):
        # tests submit, and then tests a second identical submission.
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
        if student is not None:
            instructor_task = task_function(self.create_task_request(self.instructor), location, student)
        else:
            instructor_task = task_function(self.create_task_request(self.instructor), location)

        # test resubmitting, by updating the existing record:
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
        instructor_task.task_state = PROGRESS
        instructor_task.save()

        with self.assertRaises(AlreadyRunningError):
            if student is not None:
                task_function(self.create_task_request(self.instructor), location, student)
            else:
                task_function(self.create_task_request(self.instructor), location)
示例#30
0
    def test_submit_task(self, task_function, expected_task_type, params=None):
        if params is None:
            params = {}
        if params.get('student'):
            params['student'] = self.student

        # tests submit, and then tests a second identical submission.
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
        instructor_task = task_function(self.create_task_request(self.instructor), location, **params)
        self.assertEquals(instructor_task.task_type, expected_task_type)

        # test resubmitting, by updating the existing record:
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
        instructor_task.task_state = PROGRESS
        instructor_task.save()

        with self.assertRaises(AlreadyRunningError):
            task_function(self.create_task_request(self.instructor), location, **params)
示例#31
0
    def test_reset_attempts_on_problem(self):
        """Run reset-attempts scenario on option problem"""
        # get descriptor:
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
        descriptor = self.module_store.get_item(location)
        num_attempts = 3
        # first store answers for each of the separate users:
        for _ in range(num_attempts):
            for username in self.userlist:
                self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])

        for username in self.userlist:
            self.assertEquals(self.get_num_attempts(username, descriptor), num_attempts)

        self.reset_problem_attempts('instructor', location)

        for username in self.userlist:
            self.assertEquals(self.get_num_attempts(username, descriptor), 0)
示例#32
0
    def test_reset_attempts_on_problem(self):
        """Run reset-attempts scenario on option problem"""
        # get descriptor:
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
        descriptor = self.module_store.get_item(location)
        num_attempts = 3
        # first store answers for each of the separate users:
        for _ in range(num_attempts):
            for username in self.userlist:
                self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])

        for username in self.userlist:
            self.assertEquals(self.get_num_attempts(username, descriptor), num_attempts)

        self.reset_problem_attempts('instructor', location)

        for username in self.userlist:
            self.assertEquals(self.get_num_attempts(username, descriptor), 0)
示例#33
0
    def verify_rescore_results(self, problem_edit, new_expected_scores, new_expected_max, rescore_if_higher):
        """
        Common helper to verify the results of rescoring for a single
        student and all students are as expected.
        """
        # get descriptor:
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
        descriptor = self.module_store.get_item(location)

        # first store answers for each of the separate users:
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
        self.submit_student_answer('u2', problem_url_name, [OPTION_1, OPTION_2])
        self.submit_student_answer('u3', problem_url_name, [OPTION_2, OPTION_1])
        self.submit_student_answer('u4', problem_url_name, [OPTION_2, OPTION_2])

        # verify each user's grade
        expected_original_scores = (2, 1, 1, 0)
        expected_original_max = 2
        for i, user in enumerate(self.users):
            self.check_state(user, descriptor, expected_original_scores[i], expected_original_max)

        # update the data in the problem definition so the answer changes.
        self.redefine_option_problem(problem_url_name, **problem_edit)

        # confirm that simply rendering the problem again does not change the grade
        self.render_problem('u1', problem_url_name)
        self.check_state(self.user1, descriptor, expected_original_scores[0], expected_original_max)

        # rescore the problem for only one student -- only that student's grade should change:
        self.submit_rescore_one_student_answer('instructor', problem_url_name, self.user1, rescore_if_higher)
        self.check_state(self.user1, descriptor, new_expected_scores[0], new_expected_max)
        for i, user in enumerate(self.users[1:], start=1):  # everyone other than user1
            self.check_state(user, descriptor, expected_original_scores[i], expected_original_max)

        # rescore the problem for all students
        self.submit_rescore_all_student_answers('instructor', problem_url_name, rescore_if_higher)
        for i, user in enumerate(self.users):
            self.check_state(user, descriptor, new_expected_scores[i], new_expected_max)
示例#34
0
    def test_submit_task(self, task_function, expected_task_type, params=None):
        """
        Tests submission of instructor task.
        """
        if params is None:
            params = {}
        if params.get('student'):
            params['student'] = self.student

        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)

        # unsuccessful submission, exception raised while submitting.
        with patch('lms.djangoapps.instructor_task.tasks_base.BaseInstructorTask.apply_async') as apply_async:

            error = Exception()
            apply_async.side_effect = error

            with self.assertRaises(QueueConnectionError):
                instructor_task = task_function(self.create_task_request(self.instructor), location, **params)

            most_recent_task = InstructorTask.objects.latest('id')
            self.assertEquals(most_recent_task.task_state, FAILURE)

        # successful submission
        instructor_task = task_function(self.create_task_request(self.instructor), location, **params)
        self.assertEquals(instructor_task.task_type, expected_task_type)

        # test resubmitting, by updating the existing record:
        instructor_task = InstructorTask.objects.get(id=instructor_task.id)
        instructor_task.task_state = PROGRESS
        instructor_task.save()

        with self.assertRaises(AlreadyRunningError):
            task_function(self.create_task_request(self.instructor), location, **params)
示例#35
0
    def define_randomized_custom_response_problem(self, problem_url_name, redefine=False):
        """
        Defines a custom response problem that uses a random value to determine correctness.

        Generated answer is also returned as the `msg`, so that the value can be used as a
        correct answer by a test.

        If the `redefine` flag is set, then change the definition of correctness (from equals
        to not-equals).
        """
        factory = CustomResponseXMLFactory()
        script = textwrap.dedent("""
                def check_func(expect, answer_given):
                    expected = str(random.randint(0, 100))
                    return {'ok': answer_given %s expected, 'msg': expected}
            """ % ('!=' if redefine else '=='))
        problem_xml = factory.build_xml(script=script, cfn="check_func", expect="42", num_responses=1)
        if redefine:
            descriptor = self.module_store.get_item(
                InstructorTaskModuleTestCase.problem_location(problem_url_name)
            )
            descriptor.data = problem_xml
            with self.module_store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, descriptor.location.course_key):
                self.module_store.update_item(descriptor, self.user.id)
                self.module_store.publish(descriptor.location, self.user.id)
        else:
            # Use "per-student" rerandomization so that check-problem can be called more than once.
            # Using "always" means we cannot check a problem twice, but we want to call once to get the
            # correct answer, and call a second time with that answer to confirm it's graded as correct.
            # Per-student rerandomization will at least generate different seeds for different users, so
            # we get a little more test coverage.
            ItemFactory.create(parent_location=self.problem_section.location,
                               category="problem",
                               display_name=str(problem_url_name),
                               data=problem_xml,
                               metadata={"rerandomize": "per_student"})
示例#36
0
    def test_rescoring_if_higher_scores_equal(self):
        """
        Specifically tests rescore when the previous and new raw scores are equal. In this case, the scores should
        be updated.
        """
        problem_edit = dict(num_inputs=2)  # this change to the problem means the problem will now have a max score of 4
        unchanged_max = 2
        new_max = 4
        problem_url_name = 'H1P1'
        self.define_option_problem(problem_url_name)
        location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
        descriptor = self.module_store.get_item(location)

        # first store answers for each of the separate users:
        self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])
        self.submit_student_answer('u2', problem_url_name, [OPTION_2, OPTION_2])

        # verify each user's grade
        self.check_state(self.user1, descriptor, 2, 2)  # user 1 has a 2/2
        self.check_state(self.user2, descriptor, 0, 2)  # user 2 has a 0/2

        # update the data in the problem definition so the answer changes.
        self.redefine_option_problem(problem_url_name, **problem_edit)

        # confirm that simply rendering the problem again does not change the grade
        self.render_problem('u1', problem_url_name)
        self.check_state(self.user1, descriptor, 2, 2)
        self.check_state(self.user2, descriptor, 0, 2)

        # rescore the problem for all students
        self.submit_rescore_all_student_answers('instructor', problem_url_name, True)

        # user 1's score would go down, so it remains 2/2. user 2's score was 0/2, which is equivalent to the new score
        # of 0/4, so user 2's score changes to 0/4.
        self.check_state(self.user1, descriptor, 2, unchanged_max)
        self.check_state(self.user2, descriptor, 0, new_max)