def test_successful_result_too_long(self): # while we don't expect the existing tasks to generate output that is too # long, we can test the framework will handle such an occurrence. task_entry = self._create_input_entry() self.define_option_problem(PROBLEM_URL_NAME) action_name = 'x' * 1000 update_fcn = lambda (_module_descriptor, _student_module, _xmodule_instance_args): True task_function = ( lambda entry_id, xmodule_instance_args: update_problem_module_state(entry_id, update_fcn, action_name, filter_fcn=None, xmodule_instance_args=None)) with self.assertRaises(ValueError): self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id) # compare with entry in table: entry = InstructorTask.objects.get(id=task_entry.id) self.assertEquals(entry.task_state, FAILURE) self.assertGreater(1023, len(entry.task_output)) output = json.loads(entry.task_output) self.assertEquals(output['exception'], 'ValueError') self.assertTrue( "Length of task output is too long" in output['message']) self.assertTrue('traceback' not in output)
def rescore_problem(entry_id, xmodule_instance_args): """Rescores a problem in a course, for all students or one specific student. `entry_id` is the id value of the InstructorTask entry that corresponds to this task. The entry contains the `course_id` that identifies the course, as well as the `task_input`, which contains task-specific input. The task_input should be a dict with the following entries: 'problem_url': the full URL to the problem to be rescored. (required) 'student': the identifier (username or email) of a particular user whose problem submission should be rescored. If not specified, all problem submissions for the problem will be rescored. `xmodule_instance_args` provides information needed by _get_module_instance_for_task() to instantiate an xmodule instance. """ action_name = 'rescored' update_fcn = rescore_problem_module_state filter_fcn = lambda (modules_to_update): modules_to_update.filter( state__contains='"done": true') return update_problem_module_state( entry_id, update_fcn, action_name, filter_fcn=filter_fcn, xmodule_instance_args=xmodule_instance_args)
def delete_problem_state(entry_id, xmodule_instance_args): """Deletes problem state entirely for all students on a particular problem in a course. `entry_id` is the id value of the InstructorTask entry that corresponds to this task. The entry contains the `course_id` that identifies the course, as well as the `task_input`, which contains task-specific input. The task_input should be a dict with the following entries: 'problem_url': the full URL to the problem to be rescored. (required) `xmodule_instance_args` provides information needed by _get_module_instance_for_task() to instantiate an xmodule instance. """ action_name = 'deleted' update_fcn = delete_problem_module_state return update_problem_module_state(entry_id, update_fcn, action_name, filter_fcn=None, xmodule_instance_args=xmodule_instance_args)
def delete_problem_state(entry_id, xmodule_instance_args): """Deletes problem state entirely for all students on a particular problem in a course. `entry_id` is the id value of the InstructorTask entry that corresponds to this task. The entry contains the `course_id` that identifies the course, as well as the `task_input`, which contains task-specific input. The task_input should be a dict with the following entries: 'problem_url': the full URL to the problem to be rescored. (required) `xmodule_instance_args` provides information needed by _get_module_instance_for_task() to instantiate an xmodule instance. """ action_name = 'deleted' update_fcn = delete_problem_module_state return update_problem_module_state( entry_id, update_fcn, action_name, filter_fcn=None, xmodule_instance_args=xmodule_instance_args)
def test_successful_result_too_long(self): # while we don't expect the existing tasks to generate output that is too # long, we can test the framework will handle such an occurrence. task_entry = self._create_input_entry() self.define_option_problem(PROBLEM_URL_NAME) action_name = 'x' * 1000 update_fcn = lambda(_module_descriptor, _student_module, _xmodule_instance_args): True task_function = (lambda entry_id, xmodule_instance_args: update_problem_module_state(entry_id, update_fcn, action_name, filter_fcn=None, xmodule_instance_args=None)) with self.assertRaises(ValueError): self._run_task_with_mock_celery(task_function, task_entry.id, task_entry.task_id) # compare with entry in table: entry = InstructorTask.objects.get(id=task_entry.id) self.assertEquals(entry.task_state, FAILURE) self.assertGreater(1023, len(entry.task_output)) output = json.loads(entry.task_output) self.assertEquals(output['exception'], 'ValueError') self.assertTrue("Length of task output is too long" in output['message']) self.assertTrue('traceback' not in output)
def rescore_problem(entry_id, xmodule_instance_args): """Rescores a problem in a course, for all students or one specific student. `entry_id` is the id value of the InstructorTask entry that corresponds to this task. The entry contains the `course_id` that identifies the course, as well as the `task_input`, which contains task-specific input. The task_input should be a dict with the following entries: 'problem_url': the full URL to the problem to be rescored. (required) 'student': the identifier (username or email) of a particular user whose problem submission should be rescored. If not specified, all problem submissions for the problem will be rescored. `xmodule_instance_args` provides information needed by _get_module_instance_for_task() to instantiate an xmodule instance. """ action_name = 'rescored' update_fcn = rescore_problem_module_state filter_fcn = lambda(modules_to_update): modules_to_update.filter(state__contains='"done": true') return update_problem_module_state(entry_id, update_fcn, action_name, filter_fcn=filter_fcn, xmodule_instance_args=xmodule_instance_args)