def test_message_catalog_translations(self): """ Test: Message catalog from FakeTranslation should return required translations. """ _translator = FakeTranslations.translator( { 'es': {'Hello': 'es-hello-world'}, 'fr': {'Hello': 'fr-hello-world'}, }, ) localedir = '/translations' translation.activate("es") with mock.patch('gettext.translation', return_value=_translator(domain='text', localedir=localedir, languages=[get_language()])): i18n_service = self.get_module_i18n_service(self.descriptor) self.assertEqual(i18n_service.ugettext('Hello'), 'es-hello-world') translation.activate("ar") with mock.patch('gettext.translation', return_value=_translator(domain='text', localedir=localedir, languages=[get_language()])): i18n_service = self.get_module_i18n_service(self.descriptor) self.assertEqual(get_gettext(i18n_service)('Hello'), 'Hello') self.assertNotEqual(get_gettext(i18n_service)('Hello'), 'fr-hello-world') self.assertNotEqual(get_gettext(i18n_service)('Hello'), 'es-hello-world') translation.activate("fr") with mock.patch('gettext.translation', return_value=_translator(domain='text', localedir=localedir, languages=[get_language()])): i18n_service = self.get_module_i18n_service(self.descriptor) self.assertEqual(i18n_service.ugettext('Hello'), 'fr-hello-world')
def get_grade_from_current_answers(self, student_answers): """ Gets the grade for the currently-saved problem state, but does not save it to the block. For new student_answers being graded, `student_answers` is a dict of all the entries from request.POST, but with the first part of each key removed (the string before the first "_"). Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123. For rescoring, `student_answers` is None. Calls the Response for each question in this problem, to do the actual grading. """ # old CorrectMap oldcmap = self.correct_map # start new with empty CorrectMap newcmap = CorrectMap() # Call each responsetype instance to do actual grading for responder in self.responders.values(): # File objects are passed only if responsetype explicitly allows # for file submissions. But we have no way of knowing if # student_answers contains a proper answer or the filename of # an earlier submission, so for now skip these entirely. # TODO: figure out where to get file submissions when rescoring. if 'filesubmission' in responder.allowed_inputfields and student_answers is None: _ = get_gettext(self.capa_system.i18n) raise Exception(_(u"Cannot rescore problems with possible file submissions")) # use 'student_answers' only if it is provided, and if it might contain a file # submission that would not exist in the persisted "student_answers". if 'filesubmission' in responder.allowed_inputfields and student_answers is not None: results = responder.evaluate_answers(student_answers, oldcmap) else: results = responder.evaluate_answers(self.student_answers, oldcmap) newcmap.update(results) return newcmap
def __init__(self, module): self.module = module self.old_ugettext = get_gettext(module)
def do_targeted_feedback(self, tree): """ Implements targeted-feedback in-place on <multiplechoiceresponse> -- choice-level explanations shown to a student after submission. Does nothing if there is no targeted-feedback attribute. """ _ = get_gettext(self.capa_system.i18n) # Note that the modifications has been done, avoiding problems if called twice. if hasattr(self, 'has_targeted'): return self.has_targeted = True # pylint: disable=attribute-defined-outside-init for mult_choice_response in tree.xpath('//multiplechoiceresponse[@targeted-feedback]'): show_explanation = mult_choice_response.get('targeted-feedback') == 'alwaysShowCorrectChoiceExplanation' # Grab the first choicegroup (there should only be one within each <multiplechoiceresponse> tag) choicegroup = mult_choice_response.xpath('./choicegroup[@type="MultipleChoice"]')[0] choices_list = list(choicegroup.iter('choice')) # Find the student answer key that matches our <choicegroup> id student_answer = self.student_answers.get(choicegroup.get('id')) expl_id_for_student_answer = None # Keep track of the explanation-id that corresponds to the student's answer # Also, keep track of the solution-id solution_id = None choice_correctness_for_student_answer = _('Incorrect') for choice in choices_list: if choice.get('name') == student_answer: expl_id_for_student_answer = choice.get('explanation-id') if choice.get('correct') == 'true': choice_correctness_for_student_answer = _('Correct') if choice.get('correct') == 'true': solution_id = choice.get('explanation-id') # Filter out targetedfeedback that doesn't correspond to the answer the student selected # Note: following-sibling will grab all following siblings, so we just want the first in the list targetedfeedbackset = mult_choice_response.xpath('./following-sibling::targetedfeedbackset') if len(targetedfeedbackset) != 0: targetedfeedbackset = targetedfeedbackset[0] targetedfeedbacks = targetedfeedbackset.xpath('./targetedfeedback') # find the legend by id in choicegroup.html for aria-describedby problem_legend_id = str(choicegroup.get('id')) + '-legend' for targetedfeedback in targetedfeedbacks: screenreadertext = etree.Element("span") targetedfeedback.insert(0, screenreadertext) screenreadertext.set('class', 'sr') screenreadertext.text = choice_correctness_for_student_answer targetedfeedback.set('role', 'group') targetedfeedback.set('aria-describedby', problem_legend_id) # Don't show targeted feedback if the student hasn't answer the problem # or if the target feedback doesn't match the student's (incorrect) answer if not self.done or targetedfeedback.get('explanation-id') != expl_id_for_student_answer: targetedfeedbackset.remove(targetedfeedback) # Do not displace the solution under these circumstances if not show_explanation or not self.done: continue # The next element should either be <solution> or <solutionset> next_element = targetedfeedbackset.getnext() parent_element = tree solution_element = None if next_element is not None and next_element.tag == 'solution': solution_element = next_element elif next_element is not None and next_element.tag == 'solutionset': solutions = next_element.xpath('./solution') for solution in solutions: if solution.get('explanation-id') == solution_id: parent_element = next_element solution_element = solution # If could not find the solution element, then skip the remaining steps below if solution_element is None: continue # Change our correct-choice explanation from a "solution explanation" to within # the set of targeted feedback, which means the explanation will render on the page # without the student clicking "Show Answer" or seeing a checkmark next to the correct choice parent_element.remove(solution_element) # Add our solution instead to the targetedfeedbackset and change its tag name solution_element.tag = 'targetedfeedback' targetedfeedbackset.append(solution_element)
def find_question_label(self, answer_id): """ Obtain the most relevant question text for a particular answer. E.g. in a problem like "How much is 2+2?" "Two"/"Three"/"More than three", this function returns the "How much is 2+2?" text. It uses, in order: - the question prompt, if the question has one - the <p> or <label> element which precedes the choices (skipping descriptive elements) - a text like "Question 5" if no other name could be found Arguments:: answer_id: a string like "98e6a8e915904d5389821a94e48babcf_13_1" Returns: a string with the question text """ _ = get_gettext(self.capa_system.i18n) # Some questions define a prompt with this format: >>This is a prompt<< prompt = self.problem_data[answer_id].get('label') if prompt: question_text = prompt.striptags() else: # If no prompt, then we must look for something resembling a question ourselves # # We have a structure like: # # <p /> # <optionresponse id="a0effb954cca4759994f1ac9e9434bf4_2"> # <optioninput id="a0effb954cca4759994f1ac9e9434bf4_3_1" /> # <optionresponse> # # Starting from answer (the optioninput in this example) we go up and backwards xml_elems = self.tree.xpath('//*[@id="' + answer_id + '"]') assert len(xml_elems) == 1 xml_elem = xml_elems[0].getparent() # Get the element that probably contains the question text questiontext_elem = xml_elem.getprevious() # Go backwards looking for a <p> or <label>, but skip <description> because it doesn't # contain the question text. # # E.g if we have this: # <p /> <description /> <optionresponse /> <optionresponse /> # # then from the first optionresponse we'll end with the <p>. # If we start in the second optionresponse, we'll find another response in the way, # stop early, and instead of a question we'll report "Question 2". SKIP_ELEMS = ['description'] LABEL_ELEMS = ['p', 'label'] while questiontext_elem is not None and questiontext_elem.tag in SKIP_ELEMS: questiontext_elem = questiontext_elem.getprevious() if questiontext_elem is not None and questiontext_elem.tag in LABEL_ELEMS: question_text = questiontext_elem.text else: # For instance 'd2e35c1d294b4ba0b3b1048615605d2a_2_1' contains 2, # which is used in question number 1 (see example XML in comment above) # There's no question 0 (question IDs start at 1, answer IDs at 2) question_nr = int(answer_id.split('_')[-2]) - 1 question_text = _("Question {0}").format(question_nr) return question_text