def _parse(self, oeparam, prompt, rubric, system): ''' Parse OpenEndedResponse XML: self.initial_display self.payload - dict containing keys -- 'grader' : path to grader settings file, 'problem_id' : id of the problem self.answer - What to display when show answer is clicked ''' # Note that OpenEndedResponse is agnostic to the specific contents of grader_payload prompt_string = stringify_children(prompt) rubric_string = stringify_children(rubric) self.child_prompt = prompt_string self.child_rubric = rubric_string grader_payload = oeparam.find('grader_payload') grader_payload = grader_payload.text if grader_payload is not None else '' # Update grader payload with student id. If grader payload not json, error. try: parsed_grader_payload = json.loads(grader_payload) # NOTE: self.system.location is valid because the capa_module # __init__ adds it (easiest way to get problem location into # response types) except TypeError, ValueError: # This is a dev_facing_error log.exception( "Grader payload from external open ended grading server is not a json object! Object: {0}".format( grader_payload))
def _parse(self, oeparam, prompt, rubric, system): ''' Parse OpenEndedResponse XML: self.initial_display self.payload - dict containing keys -- 'grader' : path to grader settings file, 'problem_id' : id of the problem self.answer - What to display when show answer is clicked ''' # Note that OpenEndedResponse is agnostic to the specific contents of grader_payload prompt_string = stringify_children(prompt) rubric_string = stringify_children(rubric) self.child_prompt = prompt_string self.child_rubric = rubric_string grader_payload = oeparam.find('grader_payload') grader_payload = grader_payload.text if grader_payload is not None else '' # Update grader payload with student id. If grader payload not json, error. try: parsed_grader_payload = json.loads(grader_payload) # NOTE: self.system.location is valid because the capa_module # __init__ adds it (easiest way to get problem location into # response types) except (TypeError, ValueError): # This is a dev_facing_error log.exception( "Grader payload from external open ended grading server is not a json object! Object: {0}" .format(grader_payload)) self.initial_display = find_with_default(oeparam, 'initial_display', '') self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.') parsed_grader_payload.update({ 'location': self.location_string, 'course_id': system.course_id.to_deprecated_string(), 'prompt': prompt_string, 'rubric': rubric_string, 'initial_display': self.initial_display, 'answer': self.answer, 'problem_id': self.display_name, 'skip_basic_checks': self.skip_basic_checks, 'control': json.dumps(self.control), }) updated_grader_payload = json.dumps(parsed_grader_payload) self.payload = {'grader_payload': updated_grader_payload}
def setup_response(self, system, location, definition, descriptor): """ Sets up the module @param system: Modulesystem @param location: location, to let the module know where it is. @param definition: XML definition of the module. @param descriptor: SelfAssessmentDescriptor @return: None """ self.child_prompt = stringify_children(self.child_prompt) self.child_rubric = stringify_children(self.child_rubric)
def test_stringify(): shard = 1 text = 'Hi <div x="foo">there <span>Bruce</span><b>!</b></div>' html = '''<html a="b" foo="bar">{0}</html>'''.format(text) xml = etree.fromstring(html) out = stringify_children(xml) assert_equals(out, text)
def get_rubric(self, get): """ Gets the results of a given grader via ajax. Input: AJAX get dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ all_responses = [] loop_up_to_task = self.current_task_number + 1 for i in xrange(0, loop_up_to_task): all_responses.append(self.get_last_response(i)) rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0, len(all_responses)) if len(all_responses[i]['rubric_scores']) > 0 and all_responses[i]['grader_types'][ 0] in HUMAN_GRADER_TYPE.keys()] grader_types = [all_responses[i]['grader_types'] for i in xrange(0, len(all_responses)) if len(all_responses[i]['grader_types']) > 0 and all_responses[i]['grader_types'][ 0] in HUMAN_GRADER_TYPE.keys()] feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0, len(all_responses)) if len(all_responses[i]['feedback_items']) > 0 and all_responses[i]['grader_types'][ 0] in HUMAN_GRADER_TYPE.keys()] rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']), rubric_scores, grader_types, feedback_items) response_dict = all_responses[-1] context = { 'results': rubric_html, 'task_name': 'Scored Rubric', 'class_name': 'combined-rubric-container' } html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) return {'html': html, 'success': True}
def definition_from_xml(cls, xml_object, system): """ Pull out the data into dictionary. Args: xml_object: xml from file. Returns: dict """ # check for presense of required tags in xml expected_children_level_0 = ['render', 'configuration'] for child in expected_children_level_0: if len(xml_object.xpath(child)) != 1: raise ValueError("Graphical Slider Tool definition must include \ exactly one '{0}' tag".format(child)) expected_children_level_1 = ['functions'] for child in expected_children_level_1: if len(xml_object.xpath('configuration')[0].xpath(child)) != 1: raise ValueError("Graphical Slider Tool definition must include \ exactly one '{0}' tag".format(child)) # finished return { 'data': stringify_children(xml_object) }, []
def test_stringify_again(): html = r"""<html name="Voltage Source Answer" >A voltage source is non-linear! <div align="center"> <img src="/static/images/circuits/voltage-source.png"/> \(V=V_C\) </div> But it is <a href="http://mathworld.wolfram.com/AffineFunction.html">affine</a>, which means linear except for an offset. </html> """ html = """<html>A voltage source is non-linear! <div align="center"> </div> But it is <a href="http://mathworld.wolfram.com/AffineFunction.html">affine</a>, which means linear except for an offset. </html> """ xml = etree.fromstring(html) out = stringify_children(xml) print("output:") print(out) # Tracking strange content repeating bug # Should appear once assert out.count("But it is ") == 1
def test_stringify_again(): html = r"""<html name="Voltage Source Answer" >A voltage source is non-linear! <div align="center"> <img src="/static/images/circuits/voltage-source.png"/> \(V=V_C\) </div> But it is <a href="http://mathworld.wolfram.com/AffineFunction.html">affine</a>, which means linear except for an offset. </html> """ html = """<html>A voltage source is non-linear! <div align="center"> </div> But it is <a href="http://mathworld.wolfram.com/AffineFunction.html">affine</a>, which means linear except for an offset. </html> """ xml = etree.fromstring(html) out = stringify_children(xml) print "output:" print out # Tracking strange content repeating bug # Should appear once assert_equals(out.count("But it is "), 1)
def get_rubric(self, _data): """ Gets the results of a given grader via ajax. Input: AJAX data dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ all_responses = [] loop_up_to_task = self.current_task_number + 1 for i in xrange(0, loop_up_to_task): all_responses.append(self.get_last_response(i)) rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0, len(all_responses)) if len(all_responses[i]['rubric_scores']) > 0 and all_responses[i]['grader_types'][ 0] in HUMAN_GRADER_TYPE.keys()] grader_types = [all_responses[i]['grader_types'] for i in xrange(0, len(all_responses)) if len(all_responses[i]['grader_types']) > 0 and all_responses[i]['grader_types'][ 0] in HUMAN_GRADER_TYPE.keys()] feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0, len(all_responses)) if len(all_responses[i]['feedback_items']) > 0 and all_responses[i]['grader_types'][ 0] in HUMAN_GRADER_TYPE.keys()] rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']), rubric_scores, grader_types, feedback_items) response_dict = all_responses[-1] context = { 'results': rubric_html, 'task_name': 'Scored Rubric', 'class_name': 'combined-rubric-container' } html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) return {'html': html, 'success': True}
def _parse(self, oeparam, prompt, rubric, system): ''' Parse OpenEndedResponse XML: self.initial_display self.payload - dict containing keys -- 'grader' : path to grader settings file, 'problem_id' : id of the problem self.answer - What to display when show answer is clicked ''' # Note that OpenEndedResponse is agnostic to the specific contents of grader_payload prompt_string = stringify_children(prompt) rubric_string = stringify_children(rubric) self.child_prompt = prompt_string self.child_rubric = rubric_string grader_payload = oeparam.find('grader_payload') grader_payload = grader_payload.text if grader_payload is not None else '' # Update grader payload with student id. If grader payload not json, error. try: parsed_grader_payload = json.loads(grader_payload) # NOTE: self.system.location is valid because the capa_module # __init__ adds it (easiest way to get problem location into # response types) except (TypeError, ValueError): # This is a dev_facing_error log.exception( "Grader payload from external open ended grading server is not a json object! Object: {0}".format( grader_payload)) self.initial_display = find_with_default(oeparam, 'initial_display', '') self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.') parsed_grader_payload.update({ 'location': self.location_string, 'course_id': system.course_id.to_deprecated_string(), 'prompt': prompt_string, 'rubric': rubric_string, 'initial_display': self.initial_display, 'answer': self.answer, 'problem_id': self.display_name, 'skip_basic_checks': self.skip_basic_checks, 'control': json.dumps(self.control), }) updated_grader_payload = json.dumps(parsed_grader_payload) self.payload = {'grader_payload': updated_grader_payload}
def get_rubric(self, _data): """ Gets the results of a given grader via ajax. Input: AJAX data dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ all_responses = [] success, can_see_rubric, error = self.check_if_student_has_done_needed_grading( ) if not can_see_rubric: return { 'html': self.system.render_template( '{0}/combined_open_ended_hidden_results.html'.format( self.TEMPLATE_DIR), {'error': error}), 'success': True, 'hide_reset': True } contexts = [] rubric_number = self.current_task_number if self.ready_to_reset: rubric_number += 1 response = self.get_last_response(rubric_number) score_length = len(response['grader_types']) for z in xrange(score_length): if response['grader_types'][z] in HUMAN_GRADER_TYPE: try: feedback = response['feedback_dicts'][z].get( 'feedback', '') except TypeError: return {'success': False} rubric_scores = [[response['rubric_scores'][z]]] grader_types = [[response['grader_types'][z]]] feedback_items = [[response['feedback_items'][z]]] rubric_html = self.rubric_renderer.render_combined_rubric( stringify_children(self.static_data['rubric']), rubric_scores, grader_types, feedback_items) contexts.append({ 'result': rubric_html, 'task_name': 'Scored rubric', 'feedback': feedback }) context = { 'results': contexts, } html = self.system.render_template( '{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) return {'html': html, 'success': True, 'hide_reset': False}
def parse_xml_new_runtime(cls, node, runtime, keys): """ Parse XML in the new blockstore-based runtime. Since it doesn't yet support loading separate .html files, the HTML data is assumed to be in a CDATA child or otherwise just inline in the OLX. """ block = runtime.construct_xblock_from_class(cls, keys) block.data = stringify_children(node) # Attributes become fields. for name, value in node.items(): cls._set_field_if_present(block, name, value, {}) return block
def definition_from_xml(cls, xml_object, system): """Pull out the data into dictionary. Args: xml_object: xml from file. system: `system` object. Returns: (definition, children) - tuple definition - dict: { 'answers': <List of answers>, 'question': <Question string> } """ # Check for presense of required tags in xml. if len(xml_object.xpath(cls._child_tag_name)) == 0: raise ValueError("Poll_question definition must include \ at least one 'answer' tag") xml_object_copy = deepcopy(xml_object) answers = [] for element_answer in xml_object_copy.findall(cls._child_tag_name): answer_id = element_answer.get('id', None) if answer_id: answers.append({ 'id': answer_id, 'text': stringify_children(element_answer) }) xml_object_copy.remove(element_answer) definition = { 'answers': answers, 'question': stringify_children(xml_object_copy) } children = [] return (definition, children)
def get_results(self, get): """ Gets the results of a given grader via ajax. Input: AJAX get dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ self.update_task_states() loop_up_to_task = self.current_task_number + 1 all_responses = [] for i in xrange(0, loop_up_to_task): all_responses.append(self.get_last_response(i)) context_list = [] for ri in all_responses: for i in xrange(0, len(ri['rubric_scores'])): feedback = ri['feedback_dicts'][i].get('feedback', '') rubric_data = self.rubric_renderer.render_rubric( stringify_children(self.static_data['rubric']), ri['rubric_scores'][i]) if rubric_data['success']: rubric_html = rubric_data['html'] else: rubric_html = '' context = { 'rubric_html': rubric_html, 'grader_type': ri['grader_type'], 'feedback': feedback, 'grader_id': ri['grader_ids'][i], 'submission_id': ri['submission_ids'][i], } context_list.append(context) feedback_table = self.system.render_template( '{0}/open_ended_result_table.html'.format(self.TEMPLATE_DIR), { 'context_list': context_list, 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT, 'human_grader_types': HUMAN_GRADER_TYPE, 'rows': 50, 'cols': 50, }) context = { 'results': feedback_table, 'task_name': "Feedback", 'class_name': "result-container", } html = self.system.render_template( '{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) return {'html': html, 'success': True}
def get_rubric(self, _data): """ Gets the results of a given grader via ajax. Input: AJAX data dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ all_responses = [] success, can_see_rubric, error = self.check_if_student_has_done_needed_grading() if not can_see_rubric: return { 'html': self.system.render_template( '{0}/combined_open_ended_hidden_results.html'.format(self.TEMPLATE_DIR), {'error': error}), 'success': True, 'hide_reset': True } contexts = [] rubric_number = self.current_task_number if self.ready_to_reset: rubric_number+=1 response = self.get_last_response(rubric_number) score_length = len(response['grader_types']) for z in xrange(score_length): if response['grader_types'][z] in HUMAN_GRADER_TYPE: try: feedback = response['feedback_dicts'][z].get('feedback', '') except TypeError: return {'success' : False} rubric_scores = [[response['rubric_scores'][z]]] grader_types = [[response['grader_types'][z]]] feedback_items = [[response['feedback_items'][z]]] rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']), rubric_scores, grader_types, feedback_items) contexts.append({ 'result': rubric_html, 'task_name': 'Scored rubric', 'feedback' : feedback }) context = { 'results': contexts, } html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) return {'html': html, 'success': True, 'hide_reset' : False}
def get_results(self, get): """ Gets the results of a given grader via ajax. Input: AJAX get dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ self.update_task_states() loop_up_to_task = self.current_task_number + 1 all_responses = [] for i in xrange(0, loop_up_to_task): all_responses.append(self.get_last_response(i)) context_list = [] for ri in all_responses: for i in xrange(0, len(ri['rubric_scores'])): feedback = ri['feedback_dicts'][i].get('feedback', '') rubric_data = self.rubric_renderer.render_rubric( stringify_children(self.static_data['rubric']), ri['rubric_scores'][i]) if rubric_data['success']: rubric_html = rubric_data['html'] else: rubric_html = '' context = { 'rubric_html': rubric_html, 'grader_type': ri['grader_type'], 'feedback': feedback, 'grader_id': ri['grader_ids'][i], 'submission_id': ri['submission_ids'][i], } context_list.append(context) feedback_table = self.system.render_template('{0}/open_ended_result_table.html'.format(self.TEMPLATE_DIR), { 'context_list': context_list, 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT, 'human_grader_types': HUMAN_GRADER_TYPE, 'rows': 50, 'cols': 50, }) context = { 'results': feedback_table, 'task_name': "Feedback", 'class_name': "result-container", } html = self.system.render_template( '{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) return {'html': html, 'success': True}
def get_results(self, get): """ Gets the results of a given grader via ajax. Input: AJAX get dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ self.update_task_states() loop_up_to_task = self.current_task_number + 1 all_responses = [] for i in xrange(0, loop_up_to_task): all_responses.append(self.get_last_response(i)) context_list = [] for ri in all_responses: for i in xrange(0, len(ri["rubric_scores"])): feedback = ri["feedback_dicts"][i].get("feedback", "") rubric_data = self.rubric_renderer.render_rubric( stringify_children(self.static_data["rubric"]), ri["rubric_scores"][i] ) if rubric_data["success"]: rubric_html = rubric_data["html"] else: rubric_html = "" context = { "rubric_html": rubric_html, "grader_type": ri["grader_type"], "feedback": feedback, "grader_id": ri["grader_ids"][i], "submission_id": ri["submission_ids"][i], } context_list.append(context) feedback_table = self.system.render_template( "{0}/open_ended_result_table.html".format(self.TEMPLATE_DIR), { "context_list": context_list, "grader_type_image_dict": GRADER_TYPE_IMAGE_DICT, "human_grader_types": HUMAN_GRADER_TYPE, "rows": 50, "cols": 50, }, ) context = {"results": feedback_table, "task_name": "Feedback", "class_name": "result-container"} html = self.system.render_template("{0}/combined_open_ended_results.html".format(self.TEMPLATE_DIR), context) return {"html": html, "success": True}
def get_rubric(self, _data): """ Gets the results of a given grader via ajax. Input: AJAX data dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ all_responses = [] success, can_see_rubric, error = self.check_if_student_has_done_needed_grading() if not can_see_rubric: return { "html": self.system.render_template( "{0}/combined_open_ended_hidden_results.html".format(self.TEMPLATE_DIR), {"error": error} ), "success": True, "hide_reset": True, } contexts = [] rubric_number = self.current_task_number if self.ready_to_reset: rubric_number += 1 response = self.get_last_response(rubric_number) score_length = len(response["grader_types"]) for z in xrange(score_length): if response["grader_types"][z] in HUMAN_GRADER_TYPE: try: feedback = response["feedback_dicts"][z].get("feedback", "") except TypeError: return {"success": False} rubric_scores = [[response["rubric_scores"][z]]] grader_types = [[response["grader_types"][z]]] feedback_items = [[response["feedback_items"][z]]] rubric_html = self.rubric_renderer.render_combined_rubric( stringify_children(self.static_data["rubric"]), rubric_scores, grader_types, feedback_items ) contexts.append({"result": rubric_html, "task_name": "Scored rubric", "feedback": feedback}) context = {"results": contexts} html = self.system.render_template("{0}/combined_open_ended_results.html".format(self.TEMPLATE_DIR), context) return {"html": html, "success": True, "hide_reset": False}
def assert_label(self, xpath=None, aria_label=False): """ Verify label is rendered correctly. Arguments: xpath (str): xpath expression for label element aria_label (bool): check aria-label attribute value """ labels = [ { 'actual': "You see, but you do not observe. The distinction is clear.", 'expected': "You see, but you do not observe. The distinction is clear.", }, { 'actual': "I choose to have <mark>faith</mark> because without that, I have <em>nothing</em>.", 'expected': "I choose to have faith because without that, I have nothing.", } ] response_data = { 'response_data': { 'descriptions': {}, 'label': '' } } self.context.update(response_data) for label in labels: self.context['response_data']['label'] = label['actual'] xml = self.render_to_xml(self.context) if aria_label: self.assert_has_xpath(xml, "//*[@aria-label='%s']" % label['expected'], self.context) else: element_list = xml.xpath(xpath) self.assertEqual(len(element_list), 1) self.assertEqual(stringify_children(element_list[0]), label['actual'])
def assert_label(self, xpath=None, aria_label=False): """ Verify label is rendered correctly. Arguments: xpath (str): xpath expression for label element aria_label (bool): check aria-label attribute value """ labels = [ { 'actual': "You see, but you do not observe. The distinction is clear.", 'expected': "You see, but you do not observe. The distinction is clear.", }, { 'actual': "I choose to have <mark>faith</mark> because without that, I have <em>nothing</em>.", 'expected': "I choose to have faith because without that, I have nothing.", } ] response_data = { 'response_data': { 'descriptions': {}, 'label': '' } } self.context.update(response_data) for label in labels: self.context['response_data']['label'] = label['actual'] xml = self.render_to_xml(self.context) if aria_label: self.assert_has_xpath(xml, "//*[@aria-label='%s']" % label['expected'], self.context) else: element_list = xml.xpath(xpath) assert len(element_list) == 1 assert stringify_children(element_list[0]) == label['actual']
def assert_description(self, describedby_xpaths): """ Verify that descriptions information is correct. Arguments: describedby_xpaths (list): list of xpaths to check aria-describedby attribute """ xml = self.render_to_xml(self.context) # Verify that each description <p> tag has correct id, text and order descriptions = OrderedDict( (tag.get('id'), stringify_children(tag)) for tag in xml.xpath('//p[@class="question-description"]') ) self.assertEqual(self.DESCRIPTIONS, descriptions) # for each xpath verify that description_ids are set correctly for describedby_xpath in describedby_xpaths: describedbys = xml.xpath(describedby_xpath) # aria-describedby attributes must have ids self.assertTrue(describedbys) for describedby in describedbys: self.assertEqual(describedby, self.DESCRIPTION_IDS)
def assert_description(self, describedby_xpaths): """ Verify that descriptions information is correct. Arguments: describedby_xpaths (list): list of xpaths to check aria-describedby attribute """ xml = self.render_to_xml(self.context) # Verify that each description <p> tag has correct id, text and order descriptions = OrderedDict( (tag.get('id'), stringify_children(tag)) for tag in xml.xpath('//p[@class="question-description"]')) self.assertEqual(self.DESCRIPTIONS, descriptions) # for each xpath verify that description_ids are set correctly for describedby_xpath in describedby_xpaths: describedbys = xml.xpath(describedby_xpath) # aria-describedby attributes must have ids self.assertTrue(describedbys) for describedby in describedbys: self.assertEqual(describedby, self.DESCRIPTION_IDS)
def load_definition(cls, xml_object, system, location, id_generator): '''Load a descriptor from the specified xml_object: If there is a filename attribute, load it as a string, and log a warning if it is not parseable by etree.HTMLParser. If there is not a filename attribute, the definition is the body of the xml_object, without the root tag (do not want <html> in the middle of a page) Args: xml_object: an lxml.etree._Element containing the definition to load system: the modulestore system or runtime which caches data location: the usage id for the block--used to compute the filename if none in the xml_object id_generator: used by other impls of this method to generate the usage_id ''' filename = xml_object.get('filename') if filename is None: definition_xml = copy.deepcopy(xml_object) cls.clean_metadata_from_xml(definition_xml) return {'data': stringify_children(definition_xml)}, [] else: # html is special. cls.filename_extension is 'xml', but # if 'filename' is in the definition, that means to load # from .html # 'filename' in html pointers is a relative path # (not same as 'html/blah.html' when the pointer is in a directory itself) pointer_path = "{category}/{url_path}".format( category='html', url_path=name_to_pathname(location.name) ) base = path(pointer_path).dirname() # log.debug("base = {0}, base.dirname={1}, filename={2}".format(base, base.dirname(), filename)) filepath = "{base}/{name}.html".format(base=base, name=filename) # log.debug("looking for html file for {0} at {1}".format(location, filepath)) # VS[compat] # TODO (cpennington): If the file doesn't exist at the right path, # give the class a chance to fix it up. The file will be written out # again in the correct format. This should go away once the CMS is # online and has imported all current (fall 2012) courses from xml if not system.resources_fs.exists(filepath): candidates = cls.backcompat_paths(filepath) # log.debug("candidates = {0}".format(candidates)) for candidate in candidates: if system.resources_fs.exists(candidate): filepath = candidate break try: with system.resources_fs.open(filepath) as file: html = file.read().decode('utf-8') # Log a warning if we can't parse the file, but don't error if not check_html(html) and len(html) > 0: msg = "Couldn't parse html in {0}, content = {1}".format(filepath, html) log.warning(msg) system.error_tracker("Warning: " + msg) definition = {'data': html} # TODO (ichuang): remove this after migration # for Fall 2012 LMS migration: keep filename (and unmangled filename) definition['filename'] = [filepath, filename] return definition, [] except (ResourceNotFoundError) as err: msg = 'Unable to load file contents at path {0}: {1} '.format( filepath, err) # add more info and re-raise raise Exception(msg), None, sys.exc_info()[2]
def test_stringify(): text = 'Hi <div x="foo">there <span>Bruce</span><b>!</b></div>' html = f'''<html a="b" foo="bar">{text}</html>''' xml = etree.fromstring(html) out = stringify_children(xml) assert out == text
def response_a11y_data(self, response, inputfields, responsetype_id, problem_data): """ Construct data to be used for a11y. Arguments: response (object): xml response object inputfields (list): list of inputfields in a responsetype responsetype_id (str): responsetype id problem_data (dict): dict to be filled with response data """ # if there are no inputtypes then don't do anything if not inputfields: return element_to_be_deleted = None label = '' if len(inputfields) > 1: response.set('multiple_inputtypes', 'true') group_label_tag = response.find('label') group_description_tags = response.findall('description') group_label_tag_id = u'multiinput-group-label-{}'.format( responsetype_id) group_label_tag_text = '' if group_label_tag is not None: group_label_tag.tag = 'p' group_label_tag.set('id', group_label_tag_id) group_label_tag.set('class', 'multi-inputs-group-label') group_label_tag_text = stringify_children(group_label_tag) response.set('multiinput-group-label-id', group_label_tag_id) group_description_ids = [] for index, group_description_tag in enumerate( group_description_tags): group_description_tag_id = u'multiinput-group-description-{}-{}'.format( responsetype_id, index) group_description_tag.tag = 'p' group_description_tag.set('id', group_description_tag_id) group_description_tag.set( 'class', 'multi-inputs-group-description question-description') group_description_ids.append(group_description_tag_id) if group_description_ids: response.set('multiinput-group_description_ids', ' '.join(group_description_ids)) for inputfield in inputfields: problem_data.update({ 'group_label': group_label_tag_text, 'title': inputfield.attrib.get('label', ''), 'descriptions': {} }) else: # Extract label value from <label> tag or label attribute from inside the responsetype responsetype_label_tag = response.find('label') if responsetype_label_tag is not None: label = stringify_children(responsetype_label_tag) # store <label> tag containing question text to delete # it later otherwise question will be rendered twice element_to_be_deleted = responsetype_label_tag elif 'label' in inputfields[0].attrib: # in this case we have old problems with label attribute and p tag having question in it # we will pick the first sibling of responsetype if its a p tag and match the text with # the label attribute text. if they are equal then we will use this text as question. # Get first <p> tag before responsetype, this <p> may contains the question text. p_tag = response.xpath('preceding-sibling::*[1][self::p]') if p_tag and p_tag[0].text == inputfields[0].attrib['label']: label = stringify_children(p_tag[0]) element_to_be_deleted = p_tag[0] else: # In this case the problems don't have tag or label attribute inside the responsetype # so we will get the first preceding label tag w.r.t to this responsetype. # This will take care of those multi-question problems that are not using --- in their markdown. label_tag = response.xpath( 'preceding-sibling::*[1][self::label]') if label_tag: label = stringify_children(label_tag[0]) element_to_be_deleted = label_tag[0] # delete label or p element only if inputtype is fully accessible if inputfields[ 0].tag in ACCESSIBLE_CAPA_INPUT_TYPES and element_to_be_deleted is not None: element_to_be_deleted.getparent().remove(element_to_be_deleted) # Extract descriptions and set unique id on each description tag description_tags = response.findall('description') description_id = 1 descriptions = OrderedDict() for description in description_tags: descriptions = stringify_children(description) response.remove(description) description_id += 1 problem_data.update({ 'title': label if label else '', 'descriptions': descriptions })
def load_definition(cls, xml_object, system, location): '''Load a descriptor from the specified xml_object: If there is a filename attribute, load it as a string, and log a warning if it is not parseable by etree.HTMLParser. If there is not a filename attribute, the definition is the body of the xml_object, without the root tag (do not want <html> in the middle of a page) ''' filename = xml_object.get('filename') if filename is None: definition_xml = copy.deepcopy(xml_object) cls.clean_metadata_from_xml(definition_xml) return {'data': stringify_children(definition_xml)}, [] else: # html is special. cls.filename_extension is 'xml', but # if 'filename' is in the definition, that means to load # from .html # 'filename' in html pointers is a relative path # (not same as 'html/blah.html' when the pointer is in a directory itself) pointer_path = "{category}/{url_path}".format( category='html', url_path=name_to_pathname(location.name)) base = path(pointer_path).dirname() # log.debug("base = {0}, base.dirname={1}, filename={2}".format(base, base.dirname(), filename)) filepath = "{base}/{name}.html".format(base=base, name=filename) # log.debug("looking for html file for {0} at {1}".format(location, filepath)) # VS[compat] # TODO (cpennington): If the file doesn't exist at the right path, # give the class a chance to fix it up. The file will be written out # again in the correct format. This should go away once the CMS is # online and has imported all current (fall 2012) courses from xml if not system.resources_fs.exists(filepath): candidates = cls.backcompat_paths(filepath) # log.debug("candidates = {0}".format(candidates)) for candidate in candidates: if system.resources_fs.exists(candidate): filepath = candidate break try: with system.resources_fs.open(filepath) as file: html = file.read().decode('utf-8') # Log a warning if we can't parse the file, but don't error if not check_html(html) and len(html) > 0: msg = "Couldn't parse html in {0}, content = {1}".format( filepath, html) log.warning(msg) system.error_tracker("Warning: " + msg) definition = {'data': html} # TODO (ichuang): remove this after migration # for Fall 2012 LMS migration: keep filename (and unmangled filename) definition['filename'] = [filepath, filename] return definition, [] except (ResourceNotFoundError) as err: msg = 'Unable to load file contents at path {0}: {1} '.format( filepath, err) # add more info and re-raise raise Exception(msg), None, sys.exc_info()[2]
def render(self): return stringify_children( html.fromstring(self.data).xpath('render')[0] )
def parse(k): """Assumes that xml_object has child k""" return stringify_children(xml_object.xpath(k)[0])
def __init__( self, system, location, definition, descriptor, instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs ): """ Definition file should have one or many task blocks, a rubric block, and a prompt block. See DEFAULT_DATA in combined_open_ended_module for a sample. """ self.instance_state = instance_state self.display_name = instance_state.get("display_name", "Open Ended") # We need to set the location here so the child modules can use it system.set("location", location) self.system = system # Tells the system which xml definition to load self.current_task_number = instance_state.get("current_task_number", 0) # This loads the states of the individual children self.task_states = instance_state.get("task_states", []) # This gets any old task states that have been persisted after the instructor changed the tasks. self.old_task_states = instance_state.get("old_task_states", []) # Overall state of the combined open ended module self.state = instance_state.get("state", self.INITIAL) self.student_attempts = instance_state.get("student_attempts", 0) self.weight = instance_state.get("weight", 1) # Allow reset is true if student has failed the criteria to move to the next child task self.ready_to_reset = instance_state.get("ready_to_reset", False) self.max_attempts = instance_state.get("max_attempts", MAX_ATTEMPTS) self.is_scored = instance_state.get("graded", IS_SCORED) in TRUE_DICT self.accept_file_upload = instance_state.get("accept_file_upload", ACCEPT_FILE_UPLOAD) in TRUE_DICT self.skip_basic_checks = instance_state.get("skip_spelling_checks", SKIP_BASIC_CHECKS) in TRUE_DICT if system.open_ended_grading_interface: self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system) else: self.peer_gs = MockPeerGradingService() self.required_peer_grading = instance_state.get("required_peer_grading", 3) self.peer_grader_count = instance_state.get("peer_grader_count", 3) self.min_to_calibrate = instance_state.get("min_to_calibrate", 3) self.max_to_calibrate = instance_state.get("max_to_calibrate", 6) due_date = instance_state.get("due", None) grace_period_string = instance_state.get("graceperiod", None) try: self.timeinfo = TimeInfo(due_date, grace_period_string) except Exception: log.error("Error parsing due date information in location {0}".format(location)) raise self.display_due_date = self.timeinfo.display_due_date self.rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_string = stringify_children(definition["rubric"]) self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED) # Static data is passed to the child modules to render self.static_data = { "max_score": self._max_score, "max_attempts": self.max_attempts, "prompt": definition["prompt"], "rubric": definition["rubric"], "display_name": self.display_name, "accept_file_upload": self.accept_file_upload, "close_date": self.timeinfo.close_date, "s3_interface": self.system.s3_interface, "skip_basic_checks": self.skip_basic_checks, "control": { "required_peer_grading": self.required_peer_grading, "peer_grader_count": self.peer_grader_count, "min_to_calibrate": self.min_to_calibrate, "max_to_calibrate": self.max_to_calibrate, }, } self.task_xml = definition["task_xml"] self.location = location self.fix_invalid_state() self.setup_next_task()
def __init__(self, system, location, definition, descriptor, instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs): """ Definition file should have one or many task blocks, a rubric block, and a prompt block. See DEFAULT_DATA in combined_open_ended_module for a sample. """ self.instance_state = instance_state self.display_name = instance_state.get('display_name', "Open Ended") # We need to set the location here so the child modules can use it system.set('location', location) self.system = system # Tells the system which xml definition to load self.current_task_number = instance_state.get('current_task_number', 0) # This loads the states of the individual children self.task_states = instance_state.get('task_states', []) # Overall state of the combined open ended module self.state = instance_state.get('state', self.INITIAL) self.student_attempts = instance_state.get('student_attempts', 0) self.weight = instance_state.get('weight', 1) # Allow reset is true if student has failed the criteria to move to the next child task self.ready_to_reset = instance_state.get('ready_to_reset', False) self.max_attempts = instance_state.get('max_attempts', MAX_ATTEMPTS) self.is_scored = instance_state.get('graded', IS_SCORED) in TRUE_DICT self.accept_file_upload = instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT self.skip_basic_checks = instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT if system.open_ended_grading_interface: self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system) else: self.peer_gs = MockPeerGradingService() self.required_peer_grading = instance_state.get('required_peer_grading', 3) self.peer_grader_count = instance_state.get('peer_grader_count', 3) self.min_to_calibrate = instance_state.get('min_to_calibrate', 3) self.max_to_calibrate = instance_state.get('max_to_calibrate', 6) due_date = instance_state.get('due', None) grace_period_string = instance_state.get('graceperiod', None) try: self.timeinfo = TimeInfo(due_date, grace_period_string) except Exception: log.error("Error parsing due date information in location {0}".format(location)) raise self.display_due_date = self.timeinfo.display_due_date self.rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_string = stringify_children(definition['rubric']) self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED) # Static data is passed to the child modules to render self.static_data = { 'max_score': self._max_score, 'max_attempts': self.max_attempts, 'prompt': definition['prompt'], 'rubric': definition['rubric'], 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, 'close_date': self.timeinfo.close_date, 's3_interface': self.system.s3_interface, 'skip_basic_checks': self.skip_basic_checks, 'control': { 'required_peer_grading': self.required_peer_grading, 'peer_grader_count': self.peer_grader_count, 'min_to_calibrate': self.min_to_calibrate, 'max_to_calibrate': self.max_to_calibrate, } } self.task_xml = definition['task_xml'] self.location = location self.setup_next_task()
def parse_task(k): """Assumes that xml_object has child k""" return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k)))]
def response_a11y_data(self, response, inputfields, responsetype_id, problem_data): """ Construct data to be used for a11y. Arguments: response (object): xml response object inputfields (list): list of inputfields in a responsetype responsetype_id (str): responsetype id problem_data (dict): dict to be filled with response data """ # if there are no inputtypes then don't do anything if not inputfields: return element_to_be_deleted = None label = '' if len(inputfields) > 1: response.set('multiple_inputtypes', 'true') group_label_tag = response.find('label') group_description_tags = response.findall('description') group_label_tag_id = u'multiinput-group-label-{}'.format(responsetype_id) group_label_tag_text = '' if group_label_tag is not None: group_label_tag.tag = 'p' group_label_tag.set('id', group_label_tag_id) group_label_tag.set('class', 'multi-inputs-group-label') group_label_tag_text = stringify_children(group_label_tag) response.set('multiinput-group-label-id', group_label_tag_id) group_description_ids = [] for index, group_description_tag in enumerate(group_description_tags): group_description_tag_id = u'multiinput-group-description-{}-{}'.format(responsetype_id, index) group_description_tag.tag = 'p' group_description_tag.set('id', group_description_tag_id) group_description_tag.set('class', 'multi-inputs-group-description question-description') group_description_ids.append(group_description_tag_id) if group_description_ids: response.set('multiinput-group_description_ids', ' '.join(group_description_ids)) for inputfield in inputfields: problem_data[inputfield.get('id')] = { 'group_label': group_label_tag_text, 'label': inputfield.attrib.get('label', ''), 'descriptions': {} } else: # Extract label value from <label> tag or label attribute from inside the responsetype responsetype_label_tag = response.find('label') if responsetype_label_tag is not None: label = stringify_children(responsetype_label_tag) # store <label> tag containing question text to delete # it later otherwise question will be rendered twice element_to_be_deleted = responsetype_label_tag elif 'label' in inputfields[0].attrib: # in this case we have old problems with label attribute and p tag having question in it # we will pick the first sibling of responsetype if its a p tag and match the text with # the label attribute text. if they are equal then we will use this text as question. # Get first <p> tag before responsetype, this <p> may contains the question text. p_tag = response.xpath('preceding-sibling::*[1][self::p]') if p_tag and p_tag[0].text == inputfields[0].attrib['label']: label = stringify_children(p_tag[0]) element_to_be_deleted = p_tag[0] else: # In this case the problems don't have tag or label attribute inside the responsetype # so we will get the first preceding label tag w.r.t to this responsetype. # This will take care of those multi-question problems that are not using --- in their markdown. label_tag = response.xpath('preceding-sibling::*[1][self::label]') if label_tag: label = stringify_children(label_tag[0]) element_to_be_deleted = label_tag[0] # delete label or p element only if inputtype is fully accessible if inputfields[0].tag in ACCESSIBLE_CAPA_INPUT_TYPES and element_to_be_deleted is not None: element_to_be_deleted.getparent().remove(element_to_be_deleted) # Extract descriptions and set unique id on each description tag description_tags = response.findall('description') description_id = 1 descriptions = OrderedDict() for description in description_tags: descriptions[ "description_%s_%i" % (responsetype_id, description_id) ] = HTML(stringify_children(description)) response.remove(description) description_id += 1 problem_data[inputfields[0].get('id')] = { 'label': HTML(label.strip()) if label else '', 'descriptions': descriptions }
def __init__(self, system, location, definition, descriptor, instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs): """ Definition file should have one or many task blocks, a rubric block, and a prompt block: Sample file: <combinedopenended attempts="10000"> <rubric> Blah blah rubric. </rubric> <prompt> Some prompt. </prompt> <task> <selfassessment> <hintprompt> What hint about this problem would you give to someone? </hintprompt> <submitmessage> Save Succcesful. Thanks for participating! </submitmessage> </selfassessment> </task> <task> <openended min_score_to_attempt="1" max_score_to_attempt="1"> <openendedparam> <initial_display>Enter essay here.</initial_display> <answer_display>This is the answer.</answer_display> <grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload> </openendedparam> </openended> </task> </combinedopenended> """ self.instance_state = instance_state self.display_name = instance_state.get('display_name', "Open Ended") self.rewrite_content_links = static_data.get('rewrite_content_links', "") # We need to set the location here so the child modules can use it system.set('location', location) self.system = system # Tells the system which xml definition to load self.current_task_number = instance_state.get('current_task_number', 0) # This loads the states of the individual children self.task_states = instance_state.get('task_states', []) # Overall state of the combined open ended module self.state = instance_state.get('state', self.INITIAL) self.student_attempts = instance_state.get('student_attempts', 0) self.weight = instance_state.get('weight', 1) # Allow reset is true if student has failed the criteria to move to the # next child task self.ready_to_reset = instance_state.get('ready_to_reset', False) self.attempts = self.instance_state.get('attempts', MAX_ATTEMPTS) self.is_scored = self.instance_state.get('is_graded', IS_SCORED) in TRUE_DICT self.accept_file_upload = self.instance_state.get( 'accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT self.skip_basic_checks = self.instance_state.get( 'skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT due_date = self.instance_state.get('due', None) grace_period_string = self.instance_state.get('graceperiod', None) try: self.timeinfo = TimeInfo(due_date, grace_period_string) except Exception: log.error( "Error parsing due date information in location {0}".format( location)) raise self.display_due_date = self.timeinfo.display_due_date self.rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_string = stringify_children(definition['rubric']) self._max_score = self.rubric_renderer.check_if_rubric_is_parseable( rubric_string, location, MAX_SCORE_ALLOWED) # Static data is passed to the child modules to render self.static_data = { 'max_score': self._max_score, 'max_attempts': self.attempts, 'prompt': definition['prompt'], 'rubric': definition['rubric'], 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, 'close_date': self.timeinfo.close_date, 's3_interface': self.system.s3_interface, 'skip_basic_checks': self.skip_basic_checks, } self.task_xml = definition['task_xml'] self.location = location self.setup_next_task()
def parse_task(k): """Assumes that xml_object has child k""" return [ stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k))) ]
def configuration(self): return stringify_children( html.fromstring(self.data).xpath('configuration')[0] )
def response_a11y_data(self, response, inputfields, responsetype_id, problem_data): """ Construct data to be used for a11y. Arguments: response (object): xml response object inputfields (list): list of inputfields in a responsetype responsetype_id (str): responsetype id problem_data (dict): dict to be filled with response data """ # if there are no inputtypes then don't do anything if not inputfields: return element_to_be_deleted = None label = "" if len(inputfields) > 1: response.set("multiple_inputtypes", "true") group_label_tag = response.find("label") group_label_tag_text = "" if group_label_tag is not None: group_label_tag.tag = "p" group_label_tag.set("id", responsetype_id) group_label_tag.set("class", "multi-inputs-group-label") group_label_tag_text = stringify_children(group_label_tag) for inputfield in inputfields: problem_data[inputfield.get("id")] = { "group_label": group_label_tag_text, "label": inputfield.attrib.get("label", ""), "descriptions": {}, } else: # Extract label value from <label> tag or label attribute from inside the responsetype responsetype_label_tag = response.find("label") if responsetype_label_tag is not None: label = stringify_children(responsetype_label_tag) # store <label> tag containing question text to delete # it later otherwise question will be rendered twice element_to_be_deleted = responsetype_label_tag elif "label" in inputfields[0].attrib: # in this case we have old problems with label attribute and p tag having question in it # we will pick the first sibling of responsetype if its a p tag and match the text with # the label attribute text. if they are equal then we will use this text as question. # Get first <p> tag before responsetype, this <p> may contains the question text. p_tag = response.xpath("preceding-sibling::*[1][self::p]") if p_tag and p_tag[0].text == inputfields[0].attrib["label"]: label = stringify_children(p_tag[0]) element_to_be_deleted = p_tag[0] else: # In this case the problems don't have tag or label attribute inside the responsetype # so we will get the first preceding label tag w.r.t to this responsetype. # This will take care of those multi-question problems that are not using --- in their markdown. label_tag = response.xpath("preceding-sibling::*[1][self::label]") if label_tag: label = stringify_children(label_tag[0]) element_to_be_deleted = label_tag[0] # delete label or p element only if inputtype is fully accessible if inputfields[0].tag in ACCESSIBLE_CAPA_INPUT_TYPES and element_to_be_deleted is not None: element_to_be_deleted.getparent().remove(element_to_be_deleted) # Extract descriptions and set unique id on each description tag description_tags = response.findall("description") description_id = 1 descriptions = OrderedDict() for description in description_tags: descriptions["description_%s_%i" % (responsetype_id, description_id)] = HTML( stringify_children(description) ) response.remove(description) description_id += 1 problem_data[inputfields[0].get("id")] = { "label": HTML(label.strip()) if label else "", "descriptions": descriptions, }
def __init__(self, system, location, definition, descriptor, instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs): """ Definition file should have one or many task blocks, a rubric block, and a prompt block: Sample file: <combinedopenended attempts="10000"> <rubric> Blah blah rubric. </rubric> <prompt> Some prompt. </prompt> <task> <selfassessment> <hintprompt> What hint about this problem would you give to someone? </hintprompt> <submitmessage> Save Succcesful. Thanks for participating! </submitmessage> </selfassessment> </task> <task> <openended min_score_to_attempt="1" max_score_to_attempt="1"> <openendedparam> <initial_display>Enter essay here.</initial_display> <answer_display>This is the answer.</answer_display> <grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload> </openendedparam> </openended> </task> </combinedopenended> """ self.instance_state = instance_state self.display_name = instance_state.get('display_name', "Open Ended") self.rewrite_content_links = static_data.get('rewrite_content_links', "") #We need to set the location here so the child modules can use it system.set('location', location) self.system = system #Tells the system which xml definition to load self.current_task_number = instance_state.get('current_task_number', 0) #This loads the states of the individual children self.task_states = instance_state.get('task_states', []) #Overall state of the combined open ended module self.state = instance_state.get('state', self.INITIAL) self.student_attempts = instance_state.get('student_attempts', 0) self.weight = instance_state.get('weight', 1) #Allow reset is true if student has failed the criteria to move to the next child task self.ready_to_reset = instance_state.get('ready_to_reset', False) self.attempts = self.instance_state.get('attempts', MAX_ATTEMPTS) self.is_scored = self.instance_state.get('is_graded', IS_SCORED) in TRUE_DICT self.accept_file_upload = self.instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT self.skip_basic_checks = self.instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT due_date = self.instance_state.get('due', None) grace_period_string = self.instance_state.get('graceperiod', None) try: self.timeinfo = TimeInfo(due_date, grace_period_string) except Exception: log.error("Error parsing due date information in location {0}".format(location)) raise self.display_due_date = self.timeinfo.display_due_date self.rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_string = stringify_children(definition['rubric']) self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED) #Static data is passed to the child modules to render self.static_data = { 'max_score': self._max_score, 'max_attempts': self.attempts, 'prompt': definition['prompt'], 'rubric': definition['rubric'], 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, 'close_date': self.timeinfo.close_date, 's3_interface': self.system.s3_interface, 'skip_basic_checks': self.skip_basic_checks, } self.task_xml = definition['task_xml'] self.location = location self.setup_next_task()
def __init__(self, system, location, definition, descriptor, instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs): """ Definition file should have one or many task blocks, a rubric block, and a prompt block. See DEFAULT_DATA in combined_open_ended_module for a sample. """ self.instance_state = instance_state self.display_name = instance_state.get('display_name', "Open Ended") # We need to set the location here so the child modules can use it system.set('location', location) self.system = system # Tells the system which xml definition to load self.current_task_number = instance_state.get('current_task_number', 0) # This loads the states of the individual children self.task_states = instance_state.get('task_states', []) #This gets any old task states that have been persisted after the instructor changed the tasks. self.old_task_states = instance_state.get('old_task_states', []) # Overall state of the combined open ended module self.state = instance_state.get('state', self.INITIAL) self.student_attempts = instance_state.get('student_attempts', 0) self.weight = instance_state.get('weight', 1) # Allow reset is true if student has failed the criteria to move to the next child task self.ready_to_reset = instance_state.get('ready_to_reset', False) self.max_attempts = instance_state.get('max_attempts', MAX_ATTEMPTS) self.is_scored = instance_state.get('graded', IS_SCORED) in TRUE_DICT self.accept_file_upload = instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT self.skip_basic_checks = instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT if system.open_ended_grading_interface: self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system) else: self.peer_gs = MockPeerGradingService() self.required_peer_grading = instance_state.get('required_peer_grading', 3) self.peer_grader_count = instance_state.get('peer_grader_count', 3) self.min_to_calibrate = instance_state.get('min_to_calibrate', 3) self.max_to_calibrate = instance_state.get('max_to_calibrate', 6) self.peer_grade_finished_submissions_when_none_pending = instance_state.get( 'peer_grade_finished_submissions_when_none_pending', False ) due_date = instance_state.get('due', None) grace_period_string = instance_state.get('graceperiod', None) try: self.timeinfo = TimeInfo(due_date, grace_period_string) except Exception: log.error("Error parsing due date information in location {0}".format(location)) raise self.display_due_date = self.timeinfo.display_due_date self.rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_string = stringify_children(definition['rubric']) self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED) # Static data is passed to the child modules to render self.static_data = { 'max_score': self._max_score, 'max_attempts': self.max_attempts, 'prompt': definition['prompt'], 'rubric': definition['rubric'], 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, 'close_date': self.timeinfo.close_date, 's3_interface': self.system.s3_interface, 'skip_basic_checks': self.skip_basic_checks, 'control': { 'required_peer_grading': self.required_peer_grading, 'peer_grader_count': self.peer_grader_count, 'min_to_calibrate': self.min_to_calibrate, 'max_to_calibrate': self.max_to_calibrate, 'peer_grade_finished_submissions_when_none_pending': ( self.peer_grade_finished_submissions_when_none_pending ), } } self.task_xml = definition['task_xml'] self.location = location self.fix_invalid_state() self.setup_next_task()