def test_open_ended_display(self): """ Test storing answer with the open ended module. """ # Create a module with no state yet. Important that this start off as a blank slate. test_module = OpenEndedModule(self.test_system, self.location, self.definition, self.descriptor, self.static_data, self.metadata) saved_response = "Saved response." submitted_response = "Submitted response." # Initially, there will be no stored answer. self.assertEqual(test_module.stored_answer, None) # And the initial answer to display will be an empty string. self.assertEqual(test_module.get_display_answer(), "") # Now, store an answer in the module. test_module.handle_ajax("store_answer", {'student_answer' : saved_response}, get_test_system()) # The stored answer should now equal our response. self.assertEqual(test_module.stored_answer, saved_response) self.assertEqual(test_module.get_display_answer(), saved_response) # Mock out the send_to_grader function so it doesn't try to connect to the xqueue. test_module.send_to_grader = Mock(return_value=True) # Submit a student response to the question. test_module.handle_ajax( "save_answer", {"student_answer": submitted_response}, get_test_system() ) # Submitting an answer should clear the stored answer. self.assertEqual(test_module.stored_answer, None) # Confirm that the answer is stored properly. self.assertEqual(test_module.latest_answer(), submitted_response)
def setUp(self): super(TestPartitionService, self).setUp() self.partition_service = StaticPartitionService( [self.user_partition], runtime=get_test_system(), track_function=Mock() )
def get_module(descriptor): """Mocks module_system get_module function""" sub_module_system = get_test_system(course_id=self.course.location.course_key) sub_module_system.get_module = get_module sub_module_system.descriptor_runtime = descriptor.runtime descriptor.bind_for_student(sub_module_system, descriptor._field_data) # pylint: disable=protected-access return descriptor
def setUp(self): """ Create a peer grading module from a test system. """ self.test_system = get_test_system() self.test_system.open_ended_grading_interface = None self.setup_modulestore(COURSE)
def inner_get_module(descriptor): if isinstance(descriptor, Location): location = descriptor descriptor = self.modulestore.get_item(location, depth=None) descriptor.xmodule_runtime = get_test_system() descriptor.xmodule_runtime.get_module = inner_get_module return descriptor
def get_module(descriptor): """Mocks module_system get_module function""" sub_module_system = get_test_system(course_id=module.location.course_key) sub_module_system.get_module = get_module sub_module_system.descriptor_runtime = descriptor._runtime # pylint: disable=protected-access descriptor.bind_for_student(sub_module_system, self.user_id) return descriptor
def setUp(self): super(SetupTestErrorModules, self).setUp() self.system = get_test_system() self.course_id = SlashSeparatedCourseKey('org', 'course', 'run') self.location = self.course_id.make_usage_key('foo', 'bar') self.valid_xml = u"<problem>ABC \N{SNOWMAN}</problem>" self.error_msg = "Error"
def get_module_system(self, descriptor): test_system = get_test_system() test_system.open_ended_grading_interface = None test_system.xqueue['interface'] = Mock( send_to_queue=Mock(side_effect=[1, "queued"]) ) return test_system
def _set_up_module_system(self, block): """ Sets up the test module system for the given block. """ module_system = get_test_system() module_system.descriptor_runtime = block._runtime # pylint: disable=protected-access block.xmodule_runtime = module_system
def setUp(self): self.test_system = get_test_system() self.test_system.open_ended_grading_interface = None self.test_system.xqueue['interface'] = Mock( send_to_queue=Mock(side_effect=[1, "queued"]) ) self.setup_modulestore(COURSE)
def setUp(self): self.system = get_test_system() self.org = "org" self.course = "course" self.location = Location(['i4x', self.org, self.course, None, None]) self.valid_xml = u"<problem>ABC \N{SNOWMAN}</problem>" self.error_msg = "Error"
def setUp(self): # construct module course = xml.CourseFactory.build() sequence = xml.SequenceFactory.build(parent=course) vertical = xml.VerticalFactory.build(parent=sequence) self.course = self.process_xml(course) xml.HtmlFactory(parent=vertical, url_name='test-html-1', text=self.test_html_1) xml.HtmlFactory(parent=vertical, url_name='test-html-2', text=self.test_html_2) self.course = self.process_xml(course) course_seq = self.course.get_children()[0] self.module_system = get_test_system() def get_module(descriptor): """Mocks module_system get_module function""" module_system = get_test_system() module_system.get_module = get_module descriptor.bind_for_student(module_system, descriptor._field_data) # pylint: disable=protected-access return descriptor self.module_system.get_module = get_module self.module_system.descriptor_system = self.course.runtime self.course.runtime.export_fs = MemoryFS() self.vertical = course_seq.get_children()[0] self.vertical.xmodule_runtime = self.module_system
def setUp(self): super(SplitTestModuleTest, self).setUp() self.course_id = 'test_org/test_course_number/test_run' # construct module course = xml.CourseFactory.build() sequence = xml.SequenceFactory.build(parent=course) split_test = SplitTestModuleFactory( parent=sequence, attribs={ 'user_partition_id': '0', 'group_id_to_child': '{"0": "i4x://edX/xml_test_course/html/split_test_cond0", "1": "i4x://edX/xml_test_course/html/split_test_cond1"}' # pylint: disable=line-too-long } ) xml.HtmlFactory(parent=split_test, url_name='split_test_cond0', text='HTML FOR GROUP 0') xml.HtmlFactory(parent=split_test, url_name='split_test_cond1', text='HTML FOR GROUP 1') self.course = self.process_xml(course) self.course_sequence = self.course.get_children()[0] self.module_system = get_test_system() self.module_system.descriptor_runtime = self.course._runtime # pylint: disable=protected-access self.course.runtime.export_fs = MemoryFS() # Create mock partition service, as these tests are running with XML in-memory system. self.course.user_partitions = [ self.user_partition, UserPartition( MINIMUM_STATIC_PARTITION_ID, 'second_partition', 'Second Partition', [ Group(unicode(MINIMUM_STATIC_PARTITION_ID + 1), 'abel'), Group(unicode(MINIMUM_STATIC_PARTITION_ID + 2), 'baker'), Group("103", 'charlie') ], MockUserPartitionScheme() ) ] partitions_service = MockPartitionService( self.course, course_id=self.course.id, track_function=Mock(name='track_function'), ) self.module_system._services['partitions'] = partitions_service # pylint: disable=protected-access # Mock user_service user user_service = Mock() user = Mock(username='******', email='*****@*****.**', is_staff=False, is_active=True) user_service._django_user = user self.module_system._services['user'] = user_service # pylint: disable=protected-access self.split_test_module = self.course_sequence.get_children()[0] self.split_test_module.bind_for_student( self.module_system, user.id ) # Create mock modulestore for getting the course. Needed for rendering the HTML # view, since mock services exist and the rendering code will not short-circuit. mocked_modulestore = Mock() mocked_modulestore.get_course.return_value = self.course self.split_test_module.system.modulestore = mocked_modulestore
def create(system, source_is_error_module=False): """ return a dict of modules: the conditional with a single source and a single child. Keys are 'cond_module', 'source_module', and 'child_module'. if the source_is_error_module flag is set, create a real ErrorModule for the source. """ descriptor_system = get_test_descriptor_system() # construct source descriptor and module: source_location = Location(["i4x", "edX", "conditional_test", "problem", "SampleProblem"]) if source_is_error_module: # Make an error descriptor and module source_descriptor = NonStaffErrorDescriptor.from_xml( 'some random xml data', system, org=source_location.org, course=source_location.course, error_msg='random error message' ) else: source_descriptor = Mock() source_descriptor.location = source_location source_descriptor.runtime = descriptor_system source_descriptor.render = lambda view, context=None: descriptor_system.render(source_descriptor, view, context) # construct other descriptors: child_descriptor = Mock() child_descriptor._xmodule.student_view.return_value.content = u'<p>This is a secret</p>' child_descriptor.student_view = child_descriptor._xmodule.student_view child_descriptor.displayable_items.return_value = [child_descriptor] child_descriptor.runtime = descriptor_system child_descriptor.xmodule_runtime = get_test_system() child_descriptor.render = lambda view, context=None: descriptor_system.render(child_descriptor, view, context) descriptor_system.load_item = {'child': child_descriptor, 'source': source_descriptor}.get # construct conditional module: cond_location = Location(["i4x", "edX", "conditional_test", "conditional", "SampleConditional"]) field_data = DictFieldData({ 'data': '<conditional/>', 'xml_attributes': {'attempted': 'true'}, 'children': ['child'], }) cond_descriptor = ConditionalDescriptor( descriptor_system, field_data, ScopeIds(None, None, cond_location, cond_location) ) cond_descriptor.xmodule_runtime = system system.get_module = lambda desc: desc cond_descriptor.get_required_module_descriptors = Mock(return_value=[source_descriptor]) # return dict: return {'cond_module': cond_descriptor, 'source_module': source_descriptor, 'child_module': child_descriptor}
def inner_get_module(descriptor): if isinstance(descriptor, Location): location = descriptor descriptor = self.modulestore.get_item(location, depth=None) descriptor.xmodule_runtime = get_test_system() descriptor.xmodule_runtime.descriptor_runtime = descriptor._runtime # pylint: disable=protected-access descriptor.xmodule_runtime.get_module = inner_get_module return descriptor
def create(system, source_is_error_module=False): """ return a dict of modules: the conditional with a single source and a single child. Keys are 'cond_module', 'source_module', and 'child_module'. if the source_is_error_module flag is set, create a real ErrorModule for the source. """ descriptor_system = get_test_descriptor_system() # construct source descriptor and module: source_location = Location("edX", "conditional_test", "test_run", "problem", "SampleProblem", None) if source_is_error_module: # Make an error descriptor and module source_descriptor = NonStaffErrorDescriptor.from_xml( "some random xml data", system, id_generator=CourseLocationManager(source_location.course_key), error_msg="random error message", ) else: source_descriptor = Mock(name="source_descriptor") source_descriptor.location = source_location source_descriptor.runtime = descriptor_system source_descriptor.render = lambda view, context=None: descriptor_system.render(source_descriptor, view, context) # construct other descriptors: child_descriptor = Mock(name="child_descriptor") child_descriptor._xmodule.student_view.return_value.content = u"<p>This is a secret</p>" child_descriptor.student_view = child_descriptor._xmodule.student_view child_descriptor.displayable_items.return_value = [child_descriptor] child_descriptor.runtime = descriptor_system child_descriptor.xmodule_runtime = get_test_system() child_descriptor.render = lambda view, context=None: descriptor_system.render(child_descriptor, view, context) child_descriptor.location = source_location.replace(category="html", name="child") descriptor_system.load_item = { child_descriptor.location: child_descriptor, source_location: source_descriptor, }.get system.descriptor_runtime = descriptor_system # construct conditional module: cond_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None) field_data = DictFieldData( {"data": "<conditional/>", "xml_attributes": {"attempted": "true"}, "children": [child_descriptor.location]} ) cond_descriptor = ConditionalDescriptor( descriptor_system, field_data, ScopeIds(None, None, cond_location, cond_location) ) cond_descriptor.xmodule_runtime = system system.get_module = lambda desc: desc cond_descriptor.get_required_module_descriptors = Mock(return_value=[source_descriptor]) # return dict: return {"cond_module": cond_descriptor, "source_module": source_descriptor, "child_module": child_descriptor}
def setUp(self): self.course = CourseFactory.create(data=self.COURSE_DATA) # Turn off cache. modulestore().request_cache = None modulestore().metadata_inheritance_cache_subsystem = None chapter = ItemFactory.create( parent_location=self.course.location, category="sequential", ) section = ItemFactory.create( parent_location=chapter.location, category="sequential" ) # username = robot{0}, password = '******' self.users = [ UserFactory.create(username='******' % i, email='*****@*****.**' % i) for i in range(self.USER_COUNT) ] for user in self.users: CourseEnrollmentFactory.create(user=user, course_id=self.course.id) self.item_descriptor = ItemFactory.create( parent_location=section.location, category=self.CATEGORY, data=self.DATA ) self.runtime = get_test_system() # Allow us to assert that the template was called in the same way from # different code paths while maintaining the type returned by render_template self.runtime.render_template = lambda template, context: u'{!r}, {!r}'.format(template, sorted(context.items())) model_data = {'location': self.item_descriptor.location} model_data.update(self.MODEL_DATA) self.item_module = self.item_descriptor.module_class( self.runtime, self.item_descriptor, model_data ) self.item_url = Location(self.item_module.location).url() # login all users for acces to Xmodule self.clients = {user.username: Client() for user in self.users} self.login_statuses = [ self.clients[user.username].login( username=user.username, password='******') for user in self.users ] self.assertTrue(all(self.login_statuses))
def setUp(self): """ Create a peer grading module from a test system @return: """ self.test_system = get_test_system() self.test_system.open_ended_grading_interface = None self.setup_modulestore(COURSE) self.peer_grading = self.get_module_from_location(self.pgm_location, COURSE)
def create(): """Method return Video Xmodule instance.""" location = Location(["i4x", "edX", "video", "default", "SampleProblem1"]) field_data = {'data': VideoFactory.sample_problem_xml_youtube, 'location': location} system = get_test_descriptor_system() descriptor = VideoDescriptor(system, DictFieldData(field_data), ScopeIds(None, None, None, None)) descriptor.xmodule_runtime = get_test_system() return descriptor
def setUp(self): self.course_id = "test_org/test_course_number/test_run" # construct module course = xml.CourseFactory.build() sequence = xml.SequenceFactory.build(parent=course) split_test = SplitTestModuleFactory( parent=sequence, attribs={ "user_partition_id": "0", "group_id_to_child": '{"0": "i4x://edX/xml_test_course/html/split_test_cond0", "1": "i4x://edX/xml_test_course/html/split_test_cond1"}', }, ) xml.HtmlFactory(parent=split_test, url_name="split_test_cond0", text="HTML FOR GROUP 0") xml.HtmlFactory(parent=split_test, url_name="split_test_cond1", text="HTML FOR GROUP 1") self.course = self.process_xml(course) self.course_sequence = self.course.get_children()[0] self.module_system = get_test_system() def get_module(descriptor): """Mocks module_system get_module function""" module_system = get_test_system() module_system.get_module = get_module descriptor.bind_for_student(module_system, descriptor._field_data) # pylint: disable=protected-access return descriptor self.module_system.get_module = get_module self.module_system.descriptor_system = self.course.runtime self.course.runtime.export_fs = MemoryFS() self.tags_service = MemoryUserTagsService() self.module_system._services["user_tags"] = self.tags_service # pylint: disable=protected-access self.partitions_service = StaticPartitionService( [ UserPartition(0, "first_partition", "First Partition", [Group("0", "alpha"), Group("1", "beta")]), UserPartition( 1, "second_partition", "Second Partition", [Group("0", "abel"), Group("1", "baker"), Group("2", "charlie")], ), ], user_tags_service=self.tags_service, course_id=self.course.id, track_function=Mock(name="track_function"), ) self.module_system._services["partitions"] = self.partitions_service # pylint: disable=protected-access self.split_test_module = self.course_sequence.get_children()[0] self.split_test_module.bind_for_student( self.module_system, self.split_test_module._field_data ) # pylint: disable=protected-access
def create(): """Method return Video Xmodule instance.""" location = Location(["i4x", "edX", "video", "default", "SampleProblem1"]) model_data = {'data': VideoFactory.sample_problem_xml_youtube, 'location': location} descriptor = Mock(weight="1", url_name="SampleProblem1") system = get_test_system() system.render_template = lambda template, context: context module = VideoModule(system, descriptor, model_data) return module
def setUp(self): self.course = CourseFactory.create() # Turn off cache. modulestore().request_cache = None modulestore().metadata_inheritance_cache_subsystem = None chapter = ItemFactory.create( parent_location=self.course.location, category="sequential", ) section = ItemFactory.create( parent_location=chapter.location, category="sequential" ) # username = robot{0}, password = '******' self.users = [ UserFactory.create(username='******' % i, email='*****@*****.**' % i) for i in range(self.USER_COUNT) ] for user in self.users: CourseEnrollmentFactory.create(user=user, course_id=self.course.id) self.item_descriptor = ItemFactory.create( parent_location=section.location, category=self.CATEGORY, data=self.DATA ) system = get_test_system() system.render_template = lambda template, context: context model_data = {'location': self.item_descriptor.location} model_data.update(self.MODEL_DATA) self.item_module = self.item_descriptor.module_class( system, self.item_descriptor, model_data ) self.item_url = Location(self.item_module.location).url() # login all users for acces to Xmodule self.clients = {user.username: Client() for user in self.users} self.login_statuses = [ self.clients[user.username].login( username=user.username, password='******') for user in self.users ] self.assertTrue(all(self.login_statuses))
def setUp(self): class EmptyClass: """Empty object.""" pass self.system = get_test_system() self.descriptor = EmptyClass() self.xmodule_class = self.descriptor_class.module_class self.xmodule = self.xmodule_class( self.system, self.descriptor, self.raw_model_data )
def new_module_runtime(self): """ Generate a new ModuleSystem that is minimally set up for testing """ runtime = get_test_system(course_id=self.course.id) # When asked for a module out of a descriptor, just create a new xmodule runtime, # and inject it into the descriptor def get_module(descr): descr.xmodule_runtime = self.new_module_runtime() return descr runtime.get_module = get_module return runtime
def create(): """Method return Video Xmodule instance.""" location = Location(["i4x", "edX", "video", "default", "SampleProblem1"]) model_data = {'data': VideoFactory.sample_problem_xml_youtube, 'location': location} system = get_test_system() system.render_template = lambda template, context: context descriptor = VideoDescriptor(system, model_data) module = descriptor.xmodule(system) return module
def setUp(self): self.test_system = get_test_system() self.test_system.open_ended_grading_interface = None self.test_system.location = self.location self.mock_xqueue = MagicMock() self.mock_xqueue.send_to_queue.return_value = (None, "Message") def constructed_callback(dispatch="score_update"): return dispatch self.test_system.xqueue = {'interface': self.mock_xqueue, 'construct_callback': constructed_callback, 'default_queuename': 'testqueue', 'waittime': 1} self.openendedmodule = OpenEndedModule(self.test_system, self.location, self.definition, self.descriptor, self.static_data, self.metadata)
def setUp(self): super(SplitTestModuleTest, self).setUp() self.course_id = 'test_org/test_course_number/test_run' # construct module course = xml.CourseFactory.build() sequence = xml.SequenceFactory.build(parent=course) split_test = SplitTestModuleFactory( parent=sequence, attribs={ 'user_partition_id': '0', 'group_id_to_child': '{"0": "i4x://edX/xml_test_course/html/split_test_cond0", "1": "i4x://edX/xml_test_course/html/split_test_cond1"}' } ) xml.HtmlFactory(parent=split_test, url_name='split_test_cond0', text='HTML FOR GROUP 0') xml.HtmlFactory(parent=split_test, url_name='split_test_cond1', text='HTML FOR GROUP 1') self.course = self.process_xml(course) self.course_sequence = self.course.get_children()[0] self.module_system = get_test_system() def get_module(descriptor): """Mocks module_system get_module function""" module_system = get_test_system() module_system.get_module = get_module descriptor.bind_for_student(module_system, descriptor._field_data) # pylint: disable=protected-access return descriptor self.module_system.get_module = get_module self.module_system.descriptor_system = self.course.runtime self.course.runtime.export_fs = MemoryFS() self.partitions_service = StaticPartitionService( [ self.user_partition, UserPartition( 1, 'second_partition', 'Second Partition', [Group("0", 'abel'), Group("1", 'baker'), Group("2", 'charlie')], MockUserPartitionScheme() ) ], runtime=self.module_system, track_function=Mock(name='track_function'), ) self.module_system._services['partitions'] = self.partitions_service # pylint: disable=protected-access self.split_test_module = self.course_sequence.get_children()[0] self.split_test_module.bind_for_student(self.module_system, self.split_test_module._field_data) # pylint: disable=protected-access
def test_course_error(self): """ Ensure the view still returns results even if get_courses() returns an ErrorDescriptor. The ErrorDescriptor should be filtered out. """ error_descriptor = ErrorDescriptor.from_xml( '<course></course>', get_test_system(), CourseLocationManager(CourseLocator(org='org', course='course', run='run')), None ) descriptors = [error_descriptor, self.empty_course, self.course] with patch('xmodule.modulestore.mixed.MixedModuleStore.get_courses', Mock(return_value=descriptors)): self.test_get()
def setUp(self): super(SplitTestModuleTest, self).setUp() self.course_id = "test_org/test_course_number/test_run" # construct module course = xml.CourseFactory.build() sequence = xml.SequenceFactory.build(parent=course) split_test = SplitTestModuleFactory( parent=sequence, attribs={ "user_partition_id": "0", "group_id_to_child": '{"0": "i4x://edX/xml_test_course/html/split_test_cond0", "1": "i4x://edX/xml_test_course/html/split_test_cond1"}', }, ) xml.HtmlFactory(parent=split_test, url_name="split_test_cond0", text="HTML FOR GROUP 0") xml.HtmlFactory(parent=split_test, url_name="split_test_cond1", text="HTML FOR GROUP 1") self.course = self.process_xml(course) self.course_sequence = self.course.get_children()[0] self.module_system = get_test_system() self.module_system.descriptor_runtime = ( self.course.runtime._descriptor_system ) # pylint: disable=protected-access self.course.runtime.export_fs = MemoryFS() self.partitions_service = StaticPartitionService( [ self.user_partition, UserPartition( 1, "second_partition", "Second Partition", [Group("0", "abel"), Group("1", "baker"), Group("2", "charlie")], MockUserPartitionScheme(), ), ], user=Mock(username="******", email="*****@*****.**", is_staff=False, is_active=True), course_id=self.course.id, track_function=Mock(name="track_function"), ) self.module_system._services["partitions"] = self.partitions_service # pylint: disable=protected-access self.split_test_module = self.course_sequence.get_children()[0] self.split_test_module.bind_for_student( self.module_system, self.split_test_module._field_data ) # pylint: disable=protected-access
def _bind_course_module(self, module): """ Bind a module (part of self.course) so we can access student-specific data. """ module_system = get_test_system(course_id=module.location.course_key) module_system.descriptor_runtime = module.runtime._descriptor_system # pylint: disable=protected-access module_system._services['library_tools'] = self.tools # pylint: disable=protected-access def get_module(descriptor): """Mocks module_system get_module function""" sub_module_system = get_test_system(course_id=module.location.course_key) sub_module_system.get_module = get_module sub_module_system.descriptor_runtime = descriptor._runtime # pylint: disable=protected-access descriptor.bind_for_student(sub_module_system, self.user_id) return descriptor module_system.get_module = get_module module.xmodule_runtime = module_system
def setUp(self): super(ConditionalModuleStudioTest, self).setUp() course = xml.CourseFactory.build() sequence = xml.SequenceFactory.build(parent=course) conditional = ConditionalModuleFactory( parent=sequence, attribs={ 'group_id_to_child': '{"0": "i4x://edX/xml_test_course/html/conditional_0"}' } ) xml.HtmlFactory(parent=conditional, url_name='conditional_0', text='This is a secret HTML') self.course = self.process_xml(course) self.sequence = self.course.get_children()[0] self.conditional = self.sequence.get_children()[0] self.module_system = get_test_system() self.module_system.descriptor_runtime = self.course._runtime # pylint: disable=protected-access user = Mock(username='******', email='*****@*****.**', is_staff=False, is_active=True) self.conditional.bind_for_student( self.module_system, user.id )
def setUp(self): super(BaseVerticalModuleTest, self).setUp() # construct module course = xml.CourseFactory.build() sequence = xml.SequenceFactory.build(parent=course) vertical = xml.VerticalFactory.build(parent=sequence) self.course = self.process_xml(course) xml.HtmlFactory(parent=vertical, url_name='test-html-1', text=self.test_html_1) xml.HtmlFactory(parent=vertical, url_name='test-html-2', text=self.test_html_2) self.course = self.process_xml(course) course_seq = self.course.get_children()[0] self.module_system = get_test_system() self.module_system.descriptor_runtime = self.course._runtime # pylint: disable=protected-access self.course.runtime.export_fs = MemoryFS() self.vertical = course_seq.get_children()[0] self.vertical.xmodule_runtime = self.module_system
class CombinedOpenEndedModuleTest(unittest.TestCase): """ Unit tests for the combined open ended xmodule """ location = Location( ["i4x", "edX", "open_ended", "combinedopenended", "SampleQuestion"]) definition_template = """ <combinedopenended attempts="10000"> {rubric} {prompt} <task> {task1} </task> <task> {task2} </task> </combinedopenended> """ prompt = "<prompt>This is a question prompt</prompt>" rubric = '''<rubric><rubric> <category> <description>Response Quality</description> <option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option> <option>Second option</option> </category> </rubric></rubric>''' max_score = 1 metadata = {'attempts': '10', 'max_score': max_score} static_data = { 'max_attempts': 20, 'prompt': prompt, 'rubric': rubric, 'max_score': max_score, 'display_name': 'Name', 'accept_file_upload': False, 'close_date': "", 's3_interface': test_util_open_ended.S3_INTERFACE, 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, 'skip_basic_checks': False, 'graded': True, } oeparam = etree.XML(''' <openendedparam> <initial_display>Enter essay here.</initial_display> <answer_display>This is the answer.</answer_display> <grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload> </openendedparam> ''') task_xml1 = ''' <selfassessment> <hintprompt> What hint about this problem would you give to someone? </hintprompt> <submitmessage> Save Succcesful. Thanks for participating! </submitmessage> </selfassessment> ''' task_xml2 = ''' <openended min_score_to_attempt="1" max_score_to_attempt="1"> <openendedparam> <initial_display>Enter essay here.</initial_display> <answer_display>This is the answer.</answer_display> <grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload> </openendedparam> </openended>''' definition = { 'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2] } full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2) descriptor = Mock(data=full_definition) test_system = get_test_system() combinedoe_container = CombinedOpenEndedModule(test_system, descriptor, model_data={ 'data': full_definition, 'weight': '1', 'location': location }) def setUp(self): # TODO: this constructor call is definitely wrong, but neither branch # of the merge matches the module constructor. Someone (Vik?) should fix this. self.combinedoe = CombinedOpenEndedV1Module( self.test_system, self.location, self.definition, self.descriptor, static_data=self.static_data, metadata=self.metadata, instance_state=self.static_data) def test_get_tag_name(self): name = self.combinedoe.get_tag_name("<t>Tag</t>") self.assertEqual(name, "t") def test_get_last_response(self): response_dict = self.combinedoe.get_last_response(0) self.assertEqual(response_dict['type'], "selfassessment") self.assertEqual(response_dict['max_score'], self.max_score) self.assertEqual(response_dict['state'], CombinedOpenEndedV1Module.INITIAL) def test_update_task_states(self): changed = self.combinedoe.update_task_states() self.assertFalse(changed) current_task = self.combinedoe.current_task current_task.change_state(CombinedOpenEndedV1Module.DONE) changed = self.combinedoe.update_task_states() self.assertTrue(changed) def test_get_max_score(self): self.combinedoe.update_task_states() self.combinedoe.state = "done" self.combinedoe.is_scored = True max_score = self.combinedoe.max_score() self.assertEqual(max_score, 1) def test_container_get_max_score(self): #The progress view requires that this function be exposed max_score = self.combinedoe_container.max_score() self.assertEqual(max_score, None) def test_container_weight(self): weight = self.combinedoe_container.weight self.assertEqual(weight, 1) def test_container_child_weight(self): weight = self.combinedoe_container.child_module.weight self.assertEqual(weight, 1) def test_get_score(self): score_dict = self.combinedoe.get_score() self.assertEqual(score_dict['score'], 0) self.assertEqual(score_dict['total'], 1) def test_alternate_orderings(self): t1 = self.task_xml1 t2 = self.task_xml2 xml_to_test = [[t1], [t2], [t1, t1], [t1, t2], [t2, t2], [t2, t1], [t1, t2, t1]] for xml in xml_to_test: definition = { 'prompt': etree.XML(self.prompt), 'rubric': etree.XML(self.rubric), 'task_xml': xml } descriptor = Mock(data=definition) combinedoe = CombinedOpenEndedV1Module( self.test_system, self.location, definition, descriptor, static_data=self.static_data, metadata=self.metadata, instance_state=self.static_data) changed = combinedoe.update_task_states() self.assertFalse(changed) def test_get_score_realistic(self): instance_state = r"""{"ready_to_reset": false, "skip_spelling_checks": true, "current_task_number": 1, "weight": 5.0, "graceperiod": "1 day 12 hours 59 minutes 59 seconds", "graded": "True", "task_states": ["{\"child_created\": false, \"child_attempts\": 4, \"version\": 1, \"child_history\": [{\"answer\": \"The students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the group\\u2019s procedure, describe what additional information you would need in order to replicate the expe\", \"post_assessment\": \"{\\\"submission_id\\\": 3097, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: More grammar errors than average.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"the students data are recorded in the <bg>table below . starting mass</bg> g ending mass g difference in mass g marble . . . limestone . . . wood . . . plastic . . . after reading the groups <bg>procedure , describe what additional</bg> information you would need in order to replicate the <bs>expe</bs>\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3233, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>0</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 0}, {\"answer\": \"After 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"To replicate the experiment, the procedure would require more detail. One piece of information that is omitted is the amount of vinegar used in the experiment. It is also important to know what temperature the experiment was kept at during the 24 hours. Finally, the procedure needs to include details about the experiment, for example if the whole sample must be submerged.\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"e the mass of four different samples.\\r\\nPour vinegar in each of four separate, but identical, containers.\\r\\nPlace a sample of one material into one container and label. Repeat with remaining samples, placing a single sample into a single container.\\r\\nAfter 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\", \"post_assessment\": \"[3]\", \"score\": 3}, {\"answer\": \"\", \"post_assessment\": \"[3]\", \"score\": 3}], \"max_score\": 3, \"child_state\": \"done\"}", "{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"The students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the group\\u2019s procedure, describe what additional information you would need in order to replicate the expe\", \"post_assessment\": \"{\\\"submission_id\\\": 3097, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: More grammar errors than average.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"the students data are recorded in the <bg>table below . starting mass</bg> g ending mass g difference in mass g marble . . . limestone . . . wood . . . plastic . . . after reading the groups <bg>procedure , describe what additional</bg> information you would need in order to replicate the <bs>expe</bs>\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3233, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>0</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 0}, {\"answer\": \"After 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\\r\\nStarting Mass (g)\\tEnding Mass (g)\\tDifference in Mass (g)\\r\\nMarble\\t 9.8\\t 9.4\\t\\u20130.4\\r\\nLimestone\\t10.4\\t 9.1\\t\\u20131.3\\r\\nWood\\t11.2\\t11.2\\t 0.0\\r\\nPlastic\\t 7.2\\t 7.1\\t\\u20130.1\\r\\nAfter reading the\", \"post_assessment\": \"{\\\"submission_id\\\": 3098, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"after hours , remove the samples from the containers and rinse each sample with distilled water . allow the samples to sit and dry for minutes . determine the mass of each sample . the students data are recorded in the <bg>table below . starting mass</bg> g ending mass g difference in mass g marble . . . limestone . . . wood . . . plastic . . . after reading the\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3235, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>0</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 0}, {\"answer\": \"To replicate the experiment, the procedure would require more detail. One piece of information that is omitted is the amount of vinegar used in the experiment. It is also important to know what temperature the experiment was kept at during the 24 hours. Finally, the procedure needs to include details about the experiment, for example if the whole sample must be submerged.\", \"post_assessment\": \"{\\\"submission_id\\\": 3099, \\\"score\\\": 3, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"to replicate the experiment , the procedure would require <bg>more detail . one</bg> piece of information <bg>that is omitted is the</bg> amount of vinegar used in the experiment . it is also important to know what temperature the experiment was kept at during the hours . finally , the procedure needs to include details about the experiment , for example if the whole sample must be submerged .\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3237, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>3</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 3}, {\"answer\": \"e the mass of four different samples.\\r\\nPour vinegar in each of four separate, but identical, containers.\\r\\nPlace a sample of one material into one container and label. Repeat with remaining samples, placing a single sample into a single container.\\r\\nAfter 24 hours, remove the samples from the containers and rinse each sample with distilled water.\\r\\nAllow the samples to sit and dry for 30 minutes.\\r\\nDetermine the mass of each sample.\\r\\nThe students\\u2019 data are recorded in the table below.\\r\\n\", \"post_assessment\": \"{\\\"submission_id\\\": 3100, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"e the mass of four different samples . pour vinegar in <bg>each of four separate</bg> , but identical , containers . place a sample of one material into one container and label . repeat with remaining samples , placing a single sample into a single container . after hours , remove the samples from the containers and rinse each sample with distilled water . allow the samples to sit and dry for minutes . determine the mass of each sample . the students data are recorded in the table below . \\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3239, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>0</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 0}, {\"answer\": \"\", \"post_assessment\": \"{\\\"submission_id\\\": 3101, \\\"score\\\": 0, \\\"feedback\\\": \\\"{\\\\\\\"spelling\\\\\\\": \\\\\\\"Spelling: Ok.\\\\\\\", \\\\\\\"grammar\\\\\\\": \\\\\\\"Grammar: Ok.\\\\\\\", \\\\\\\"markup-text\\\\\\\": \\\\\\\"invalid essay .\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 3241, \\\"grader_type\\\": \\\"ML\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>Response Quality</description><score>0</score><option points='0'>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option><option points='1'>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option><option points='2'>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option><option points='3'>The response is correct, complete, and contains evidence of higher-order thinking.</option></category></rubric>\\\"}\", \"score\": 0}], \"max_score\": 3, \"child_state\": \"done\"}"], "attempts": "10000", "student_attempts": 0, "due": null, "state": "done", "accept_file_upload": false, "display_name": "Science Question -- Machine Assessed"}""" instance_state = json.loads(instance_state) rubric = """ <rubric> <rubric> <category> <description>Response Quality</description> <option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option> <option>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option> <option>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option> <option>The response is correct, complete, and contains evidence of higher-order thinking.</option> </category> </rubric> </rubric> """ definition = { 'prompt': etree.XML(self.prompt), 'rubric': etree.XML(rubric), 'task_xml': [self.task_xml1, self.task_xml2] } descriptor = Mock(data=definition) combinedoe = CombinedOpenEndedV1Module(self.test_system, self.location, definition, descriptor, static_data=self.static_data, metadata=self.metadata, instance_state=instance_state) score_dict = combinedoe.get_score() self.assertEqual(score_dict['score'], 15.0) self.assertEqual(score_dict['total'], 15.0)
def get_module(descriptor): module_system = get_test_system() module_system.get_module = get_module descriptor.bind_for_student(module_system, descriptor._field_data) return descriptor
def get_module_system(self, descriptor): test_system = get_test_system() test_system.open_ended_grading_interface = None test_system.xqueue['interface'] = Mock(send_to_queue=Mock( side_effect=[1, "queued"])) return test_system
def new_module_runtime(self): """ Generate a new ModuleSystem that is minimally set up for testing """ return get_test_system(course_id=self.course.id)
def setUp(self): self.test_system = get_test_system() self.openendedchild = OpenEndedChild(self.test_system, self.location, self.definition, self.descriptor, self.static_data, self.metadata)
def get_module_system(self, descriptor): test_system = get_test_system(self.course_id) test_system.open_ended_grading_interface = None return test_system
def setUp(self): super(SplitTestModuleTest, self).setUp() self.course_id = 'test_org/test_course_number/test_run' # construct module course = xml.CourseFactory.build() sequence = xml.SequenceFactory.build(parent=course) split_test = SplitTestModuleFactory( parent=sequence, attribs={ 'user_partition_id': '0', 'group_id_to_child': '{"0": "i4x://edX/xml_test_course/html/split_test_cond0", "1": "i4x://edX/xml_test_course/html/split_test_cond1"}' # pylint: disable=line-too-long }) xml.HtmlFactory(parent=split_test, url_name='split_test_cond0', text='HTML FOR GROUP 0') xml.HtmlFactory(parent=split_test, url_name='split_test_cond1', text='HTML FOR GROUP 1') self.course = self.process_xml(course) self.course_sequence = self.course.get_children()[0] self.module_system = get_test_system() self.module_system.descriptor_runtime = self.course._runtime # pylint: disable=protected-access self.course.runtime.export_fs = MemoryFS() # Create mock partition service, as these tests are running with XML in-memory system. self.course.user_partitions = [ self.user_partition, UserPartition( MINIMUM_STATIC_PARTITION_ID, 'second_partition', 'Second Partition', [ Group(unicode(MINIMUM_STATIC_PARTITION_ID + 1), 'abel'), Group(unicode(MINIMUM_STATIC_PARTITION_ID + 2), 'baker'), Group("103", 'charlie') ], MockUserPartitionScheme()) ] partitions_service = MockPartitionService( self.course, course_id=self.course.id, track_function=Mock(name='track_function'), ) self.module_system._services['partitions'] = partitions_service # pylint: disable=protected-access # Mock user_service user user_service = Mock() user = Mock(username='******', email='*****@*****.**', is_staff=False, is_active=True) user_service._django_user = user self.module_system._services['user'] = user_service # pylint: disable=protected-access self.split_test_module = self.course_sequence.get_children()[0] self.split_test_module.bind_for_student(self.module_system, user.id) # Create mock modulestore for getting the course. Needed for rendering the HTML # view, since mock services exist and the rendering code will not short-circuit. mocked_modulestore = Mock() mocked_modulestore.get_course.return_value = self.course self.split_test_module.system.modulestore = mocked_modulestore
def setUp(self): self.test_system = get_test_system() self.test_system.open_ended_grading_interface = None self.openendedchild = OpenEndedChild(self.test_system, self.location, self.definition, self.descriptor, self.static_data, self.metadata)
def create(system, source_is_error_module=False): """ return a dict of modules: the conditional with a single source and a single child. Keys are 'cond_module', 'source_module', and 'child_module'. if the source_is_error_module flag is set, create a real ErrorModule for the source. """ descriptor_system = get_test_descriptor_system() # construct source descriptor and module: source_location = Location("edX", "conditional_test", "test_run", "problem", "SampleProblem", None) if source_is_error_module: # Make an error descriptor and module source_descriptor = NonStaffErrorDescriptor.from_xml( 'some random xml data', system, id_generator=CourseLocationManager(source_location.course_key), error_msg='random error message') else: source_descriptor = Mock(name='source_descriptor') source_descriptor.location = source_location source_descriptor.runtime = descriptor_system source_descriptor.render = lambda view, context=None: descriptor_system.render( source_descriptor, view, context) # construct other descriptors: child_descriptor = Mock(name='child_descriptor') child_descriptor._xmodule.student_view.return_value.content = u'<p>This is a secret</p>' child_descriptor.student_view = child_descriptor._xmodule.student_view child_descriptor.displayable_items.return_value = [child_descriptor] child_descriptor.runtime = descriptor_system child_descriptor.xmodule_runtime = get_test_system() child_descriptor.render = lambda view, context=None: descriptor_system.render( child_descriptor, view, context) child_descriptor.location = source_location.replace(category='html', name='child') descriptor_system.load_item = { child_descriptor.location: child_descriptor, source_location: source_descriptor }.get system.descriptor_runtime = descriptor_system # construct conditional module: cond_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None) field_data = DictFieldData({ 'data': '<conditional/>', 'xml_attributes': { 'attempted': 'true' }, 'children': [child_descriptor.location], }) cond_descriptor = ConditionalDescriptor( descriptor_system, field_data, ScopeIds(None, None, cond_location, cond_location)) cond_descriptor.xmodule_runtime = system system.get_module = lambda desc: desc cond_descriptor.get_required_module_descriptors = Mock( return_value=[source_descriptor]) # return dict: return { 'cond_module': cond_descriptor, 'source_module': source_descriptor, 'child_module': child_descriptor }
def setUp(self): super(ConditionalBlockXmlTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments self.test_system = get_test_system()
def setUp(self): super().setUp() self.test_system = get_test_system()
def get_module(descriptor): """Mocks module_system get_module function""" module_system = get_test_system() module_system.get_module = get_module descriptor.bind_for_student(module_system, descriptor._field_data) # pylint: disable=protected-access return descriptor
def setUp(self): super(ConditionalBlockBasicTest, self).setUp() self.test_system = get_test_system()
def setUp(self): self.test_system = get_test_system() self.test_system.open_ended_grading_interface = None self.test_system.xqueue['interface'] = Mock(send_to_queue=Mock( side_effect=[1, "queued"])) self.setup_modulestore(COURSE)
def setUp(self): self.test_system = get_test_system()
class CombinedOpenEndedModuleTest(unittest.TestCase): """ Unit tests for the combined open ended xmodule """ location = Location( ["i4x", "edX", "open_ended", "combinedopenended", "SampleQuestion"]) definition_template = """ <combinedopenended attempts="10000"> {rubric} {prompt} <task> {task1} </task> <task> {task2} </task> </combinedopenended> """ prompt = "<prompt>This is a question prompt</prompt>" rubric = '''<rubric><rubric> <category> <description>Response Quality</description> <option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option> <option>Second option</option> </category> </rubric></rubric>''' max_score = 1 metadata = {'attempts': '10', 'max_score': max_score} static_data = { 'max_attempts': 20, 'prompt': prompt, 'rubric': rubric, 'max_score': max_score, 'display_name': 'Name', 'accept_file_upload': False, 'close_date': "", 's3_interface': test_util_open_ended.S3_INTERFACE, 'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE, 'skip_basic_checks': False, 'graded': True, } oeparam = etree.XML(''' <openendedparam> <initial_display>Enter essay here.</initial_display> <answer_display>This is the answer.</answer_display> <grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload> </openendedparam> ''') task_xml1 = ''' <selfassessment> <hintprompt> What hint about this problem would you give to someone? </hintprompt> <submitmessage> Save Succcesful. Thanks for participating! </submitmessage> </selfassessment> ''' task_xml2 = ''' <openended min_score_to_attempt="1" max_score_to_attempt="1"> <openendedparam> <initial_display>Enter essay here.</initial_display> <answer_display>This is the answer.</answer_display> <grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload> </openendedparam> </openended>''' definition = { 'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2] } full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2) descriptor = Mock(data=full_definition) test_system = get_test_system() test_system.open_ended_grading_interface = None combinedoe_container = CombinedOpenEndedModule( descriptor=descriptor, runtime=test_system, field_data=DictFieldData({ 'data': full_definition, 'weight': '1', }), scope_ids=ScopeIds(None, None, None, None), ) def setUp(self): self.combinedoe = CombinedOpenEndedV1Module( self.test_system, self.location, self.definition, self.descriptor, static_data=self.static_data, metadata=self.metadata, instance_state=self.static_data) def test_get_tag_name(self): """ Test to see if the xml tag name is correct """ name = self.combinedoe.get_tag_name("<t>Tag</t>") self.assertEqual(name, "t") def test_get_last_response(self): """ See if we can parse the last response """ response_dict = self.combinedoe.get_last_response(0) self.assertEqual(response_dict['type'], "selfassessment") self.assertEqual(response_dict['max_score'], self.max_score) self.assertEqual(response_dict['state'], CombinedOpenEndedV1Module.INITIAL) def test_update_task_states(self): """ See if we can update the task states properly """ changed = self.combinedoe.update_task_states() self.assertFalse(changed) current_task = self.combinedoe.current_task current_task.change_state(CombinedOpenEndedV1Module.DONE) changed = self.combinedoe.update_task_states() self.assertTrue(changed) def test_get_max_score(self): """ Try to get the max score of the problem """ self.combinedoe.update_task_states() self.combinedoe.state = "done" self.combinedoe.is_scored = True max_score = self.combinedoe.max_score() self.assertEqual(max_score, 1) def test_container_get_max_score(self): """ See if we can get the max score from the actual xmodule """ # The progress view requires that this function be exposed max_score = self.combinedoe_container.max_score() self.assertEqual(max_score, None) def test_container_get_progress(self): """ See if we can get the progress from the actual xmodule """ progress = self.combinedoe_container.max_score() self.assertEqual(progress, None) def test_get_progress(self): """ Test if we can get the correct progress from the combined open ended class """ self.combinedoe.update_task_states() self.combinedoe.state = "done" self.combinedoe.is_scored = True progress = self.combinedoe.get_progress() self.assertIsInstance(progress, Progress) # progress._a is the score of the xmodule, which is 0 right now. self.assertEqual(progress._a, 0) # progress._b is the max_score (which is 1), divided by the weight (which is 1). self.assertEqual(progress._b, 1) def test_container_weight(self): """ Check the problem weight in the container """ weight = self.combinedoe_container.weight self.assertEqual(weight, 1) def test_container_child_weight(self): """ Test the class to see if it picks up the right weight """ weight = self.combinedoe_container.child_module.weight self.assertEqual(weight, 1) def test_get_score(self): """ See if scoring works """ score_dict = self.combinedoe.get_score() self.assertEqual(score_dict['score'], 0) self.assertEqual(score_dict['total'], 1) def test_alternate_orderings(self): """ Try multiple ordering of definitions to see if the problem renders different steps correctly. """ t1 = self.task_xml1 t2 = self.task_xml2 xml_to_test = [[t1], [t2], [t1, t1], [t1, t2], [t2, t2], [t2, t1], [t1, t2, t1]] for xml in xml_to_test: definition = { 'prompt': etree.XML(self.prompt), 'rubric': etree.XML(self.rubric), 'task_xml': xml } descriptor = Mock(data=definition) combinedoe = CombinedOpenEndedV1Module( self.test_system, self.location, definition, descriptor, static_data=self.static_data, metadata=self.metadata, instance_state=self.static_data) changed = combinedoe.update_task_states() self.assertFalse(changed) combinedoe = CombinedOpenEndedV1Module( self.test_system, self.location, definition, descriptor, static_data=self.static_data, metadata=self.metadata, instance_state={'task_states': TEST_STATE_SA}) combinedoe = CombinedOpenEndedV1Module( self.test_system, self.location, definition, descriptor, static_data=self.static_data, metadata=self.metadata, instance_state={'task_states': TEST_STATE_SA_IN}) def test_get_score_realistic(self): """ Try to parse the correct score from a json instance state """ instance_state = json.loads(MOCK_INSTANCE_STATE) rubric = """ <rubric> <rubric> <category> <description>Response Quality</description> <option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option> <option>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option> <option>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option> <option>The response is correct, complete, and contains evidence of higher-order thinking.</option> </category> </rubric> </rubric> """ definition = { 'prompt': etree.XML(self.prompt), 'rubric': etree.XML(rubric), 'task_xml': [self.task_xml1, self.task_xml2] } descriptor = Mock(data=definition) combinedoe = CombinedOpenEndedV1Module(self.test_system, self.location, definition, descriptor, static_data=self.static_data, metadata=self.metadata, instance_state=instance_state) score_dict = combinedoe.get_score() self.assertEqual(score_dict['score'], 15.0) self.assertEqual(score_dict['total'], 15.0) def generate_oe_module(self, task_state, task_number, task_xml): """ Return a combined open ended module with the specified parameters """ definition = { 'prompt': etree.XML(self.prompt), 'rubric': etree.XML(self.rubric), 'task_xml': task_xml } descriptor = Mock(data=definition) instance_state = {'task_states': task_state, 'graded': True} if task_number is not None: instance_state.update({'current_task_number': task_number}) combinedoe = CombinedOpenEndedV1Module(self.test_system, self.location, definition, descriptor, static_data=self.static_data, metadata=self.metadata, instance_state=instance_state) return combinedoe def ai_state_reset(self, task_state, task_number=None): """ See if state is properly reset """ combinedoe = self.generate_oe_module(task_state, task_number, [self.task_xml2]) html = combinedoe.get_html() self.assertIsInstance(html, basestring) score = combinedoe.get_score() if combinedoe.is_scored: self.assertEqual(score['score'], 0) else: self.assertEqual(score['score'], None) def ai_state_success(self, task_state, task_number=None, iscore=2, tasks=None): """ See if state stays the same """ if tasks is None: tasks = [self.task_xml1, self.task_xml2] combinedoe = self.generate_oe_module(task_state, task_number, tasks) html = combinedoe.get_html() self.assertIsInstance(html, basestring) score = combinedoe.get_score() self.assertEqual(int(score['score']), iscore) def test_ai_state_reset(self): self.ai_state_reset(TEST_STATE_AI) def test_ai_state2_reset(self): self.ai_state_reset(TEST_STATE_AI2) def test_ai_invalid_state(self): self.ai_state_reset(TEST_STATE_AI2_INVALID) def test_ai_state_rest_task_number(self): self.ai_state_reset(TEST_STATE_AI, task_number=2) self.ai_state_reset(TEST_STATE_AI, task_number=5) self.ai_state_reset(TEST_STATE_AI, task_number=1) self.ai_state_reset(TEST_STATE_AI, task_number=0) def test_ai_state_success(self): self.ai_state_success(TEST_STATE_AI) def test_state_single(self): self.ai_state_success(TEST_STATE_SINGLE, iscore=12) def test_state_pe_single(self): self.ai_state_success(TEST_STATE_PE_SINGLE, iscore=0, tasks=[self.task_xml2])
def setUp(self): super(ConditionalModuleXmlTest, self).setUp() self.test_system = get_test_system()
def test_student_view_init(self): module_system = get_test_system() module_system.position = 2 seq_module = SequenceBlock(runtime=module_system, scope_ids=Mock()) seq_module.bind_for_student(module_system, 34) assert seq_module.position == 2
def create(system, source_is_error_module=False, source_visible_to_staff_only=False): """ return a dict of modules: the conditional with a single source and a single child. Keys are 'cond_module', 'source_module', and 'child_module'. if the source_is_error_module flag is set, create a real ErrorModule for the source. """ descriptor_system = get_test_descriptor_system() # construct source descriptor and module: source_location = BlockUsageLocator(CourseLocator("edX", "conditional_test", "test_run", deprecated=True), "problem", "SampleProblem", deprecated=True) if source_is_error_module: # Make an error descriptor and module source_descriptor = NonStaffErrorDescriptor.from_xml( 'some random xml data', system, id_generator=CourseLocationManager(source_location.course_key), error_msg='random error message') else: source_descriptor = Mock(name='source_descriptor') source_descriptor.location = source_location source_descriptor.visible_to_staff_only = source_visible_to_staff_only source_descriptor.runtime = descriptor_system source_descriptor.render = lambda view, context=None: descriptor_system.render( source_descriptor, view, context) # construct other descriptors: child_descriptor = Mock(name='child_descriptor') child_descriptor.visible_to_staff_only = False child_descriptor._xmodule.student_view.return_value = Fragment( content=u'<p>This is a secret</p>') child_descriptor.student_view = child_descriptor._xmodule.student_view child_descriptor.displayable_items.return_value = [child_descriptor] child_descriptor.runtime = descriptor_system child_descriptor.xmodule_runtime = get_test_system() child_descriptor.render = lambda view, context=None: descriptor_system.render( child_descriptor, view, context) child_descriptor.location = source_location.replace(category='html', name='child') def visible_to_nonstaff_users(desc): """ Returns if the object is visible to nonstaff users. """ return not desc.visible_to_staff_only def load_item(usage_id, for_parent=None): # pylint: disable=unused-argument """Test-only implementation of load_item that simply returns static xblocks.""" return { child_descriptor.location: child_descriptor, source_location: source_descriptor }.get(usage_id) descriptor_system.load_item = load_item system.descriptor_runtime = descriptor_system # construct conditional module: cond_location = BlockUsageLocator(CourseLocator("edX", "conditional_test", "test_run", deprecated=True), "conditional", "SampleConditional", deprecated=True) field_data = DictFieldData({ 'data': '<conditional/>', 'conditional_attr': 'attempted', 'conditional_value': 'true', 'xml_attributes': { 'attempted': 'true' }, 'children': [child_descriptor.location], }) cond_descriptor = ConditionalDescriptor( descriptor_system, field_data, ScopeIds(None, None, cond_location, cond_location)) cond_descriptor.xmodule_runtime = system system.get_module = lambda desc: desc if visible_to_nonstaff_users( desc) else None cond_descriptor.get_required_module_descriptors = Mock( return_value=[source_descriptor]) cond_descriptor.required_modules = [ system.get_module(descriptor) for descriptor in cond_descriptor.get_required_module_descriptors() ] # return dict: return { 'cond_module': cond_descriptor, 'source_module': source_descriptor, 'child_module': child_descriptor }
def setUp(self): self.system = get_test_system() self.course_id = SlashSeparatedCourseKey('org', 'course', 'run') self.location = self.course_id.make_usage_key('foo', 'bar') self.valid_xml = u"<problem>ABC \N{SNOWMAN}</problem>" self.error_msg = "Error"
def _build(cls, target_class, *args, **kwargs): # pylint: disable=unused-argument """See documentation from :meth:`factory.Factory._build`""" return get_test_system(*args, **kwargs)
def leaf_module_runtime(self): return get_test_system()