def __init__(self, system, location, definition, descriptor, instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs): """ Definition file should have one or many task blocks, a rubric block, and a prompt block. See DEFAULT_DATA in combined_open_ended_module for a sample. """ self.instance_state = instance_state self.display_name = instance_state.get('display_name', "Open Ended") # We need to set the location here so the child modules can use it system.set('location', location) self.system = system # Tells the system which xml definition to load self.current_task_number = instance_state.get('current_task_number', 0) # This loads the states of the individual children self.task_states = instance_state.get('task_states', []) # Overall state of the combined open ended module self.state = instance_state.get('state', self.INITIAL) self.student_attempts = instance_state.get('student_attempts', 0) self.weight = instance_state.get('weight', 1) # Allow reset is true if student has failed the criteria to move to the next child task self.ready_to_reset = instance_state.get('ready_to_reset', False) self.max_attempts = instance_state.get('max_attempts', MAX_ATTEMPTS) self.is_scored = instance_state.get('graded', IS_SCORED) in TRUE_DICT self.accept_file_upload = instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT self.skip_basic_checks = instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT if system.open_ended_grading_interface: self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system) else: self.peer_gs = MockPeerGradingService() self.required_peer_grading = instance_state.get('required_peer_grading', 3) self.peer_grader_count = instance_state.get('peer_grader_count', 3) self.min_to_calibrate = instance_state.get('min_to_calibrate', 3) self.max_to_calibrate = instance_state.get('max_to_calibrate', 6) due_date = instance_state.get('due', None) grace_period_string = instance_state.get('graceperiod', None) try: self.timeinfo = TimeInfo(due_date, grace_period_string) except Exception: log.error("Error parsing due date information in location {0}".format(location)) raise self.display_due_date = self.timeinfo.display_due_date self.rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_string = stringify_children(definition['rubric']) self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED) # Static data is passed to the child modules to render self.static_data = { 'max_score': self._max_score, 'max_attempts': self.max_attempts, 'prompt': definition['prompt'], 'rubric': definition['rubric'], 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, 'close_date': self.timeinfo.close_date, 's3_interface': self.system.s3_interface, 'skip_basic_checks': self.skip_basic_checks, 'control': { 'required_peer_grading': self.required_peer_grading, 'peer_grader_count': self.peer_grader_count, 'min_to_calibrate': self.min_to_calibrate, 'max_to_calibrate': self.max_to_calibrate, } } self.task_xml = definition['task_xml'] self.location = location self.setup_next_task()
def __init__( self, system, location, definition, descriptor, instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs ): """ Definition file should have one or many task blocks, a rubric block, and a prompt block. See DEFAULT_DATA in combined_open_ended_module for a sample. """ self.instance_state = instance_state self.display_name = instance_state.get("display_name", "Open Ended") # We need to set the location here so the child modules can use it system.set("location", location) self.system = system # Tells the system which xml definition to load self.current_task_number = instance_state.get("current_task_number", 0) # This loads the states of the individual children self.task_states = instance_state.get("task_states", []) # Overall state of the combined open ended module self.state = instance_state.get("state", self.INITIAL) self.student_attempts = instance_state.get("student_attempts", 0) self.weight = instance_state.get("weight", 1) # Allow reset is true if student has failed the criteria to move to the next child task self.ready_to_reset = instance_state.get("ready_to_reset", False) self.max_attempts = instance_state.get("max_attempts", MAX_ATTEMPTS) self.is_scored = instance_state.get("graded", IS_SCORED) in TRUE_DICT self.accept_file_upload = instance_state.get("accept_file_upload", ACCEPT_FILE_UPLOAD) in TRUE_DICT self.skip_basic_checks = instance_state.get("skip_spelling_checks", SKIP_BASIC_CHECKS) in TRUE_DICT if system.open_ended_grading_interface: self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system) else: self.peer_gs = MockPeerGradingService() self.required_peer_grading = instance_state.get("required_peer_grading", 3) self.peer_grader_count = instance_state.get("peer_grader_count", 3) self.min_to_calibrate = instance_state.get("min_to_calibrate", 3) self.max_to_calibrate = instance_state.get("max_to_calibrate", 6) due_date = instance_state.get("due", None) grace_period_string = instance_state.get("graceperiod", None) try: self.timeinfo = TimeInfo(due_date, grace_period_string) except Exception: log.error("Error parsing due date information in location {0}".format(location)) raise self.display_due_date = self.timeinfo.display_due_date self.rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_string = stringify_children(definition["rubric"]) self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED) # Static data is passed to the child modules to render self.static_data = { "max_score": self._max_score, "max_attempts": self.max_attempts, "prompt": definition["prompt"], "rubric": definition["rubric"], "display_name": self.display_name, "accept_file_upload": self.accept_file_upload, "close_date": self.timeinfo.close_date, "s3_interface": self.system.s3_interface, "skip_basic_checks": self.skip_basic_checks, "control": { "required_peer_grading": self.required_peer_grading, "peer_grader_count": self.peer_grader_count, "min_to_calibrate": self.min_to_calibrate, "max_to_calibrate": self.max_to_calibrate, }, } self.task_xml = definition["task_xml"] self.location = location self.setup_next_task()
class CombinedOpenEndedV1Module(): """ This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc). It transitions between problems, and support arbitrary ordering. Each combined open ended module contains one or multiple "child" modules. Child modules track their own state, and can transition between states. They also implement get_html and handle_ajax. The combined open ended module transitions between child modules as appropriate, tracks its own state, and passess ajax requests from the browser to the child module or handles them itself (in the cases of reset and next problem) ajax actions implemented by all children are: 'save_answer' -- Saves the student answer 'save_assessment' -- Saves the student assessment (or external grader assessment) 'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc) ajax actions implemented by combined open ended module are: 'reset' -- resets the whole combined open ended module and returns to the first child moduleresource_string 'next_problem' -- moves to the next child module Types of children. Task is synonymous with child module, so each combined open ended module incorporates multiple children (tasks): openendedmodule selfassessmentmodule """ STATE_VERSION = 1 # states INITIAL = 'initial' ASSESSING = 'assessing' INTERMEDIATE_DONE = 'intermediate_done' DONE = 'done' # Where the templates live for this problem TEMPLATE_DIR = "combinedopenended" def __init__(self, system, location, definition, descriptor, instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs): """ Definition file should have one or many task blocks, a rubric block, and a prompt block. See DEFAULT_DATA in combined_open_ended_module for a sample. """ self.instance_state = instance_state self.display_name = instance_state.get('display_name', "Open Ended") # We need to set the location here so the child modules can use it system.set('location', location) self.system = system # Tells the system which xml definition to load self.current_task_number = instance_state.get('current_task_number', 0) # This loads the states of the individual children self.task_states = instance_state.get('task_states', []) # Overall state of the combined open ended module self.state = instance_state.get('state', self.INITIAL) self.student_attempts = instance_state.get('student_attempts', 0) self.weight = instance_state.get('weight', 1) # Allow reset is true if student has failed the criteria to move to the next child task self.ready_to_reset = instance_state.get('ready_to_reset', False) self.max_attempts = instance_state.get('max_attempts', MAX_ATTEMPTS) self.is_scored = instance_state.get('graded', IS_SCORED) in TRUE_DICT self.accept_file_upload = instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT self.skip_basic_checks = instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT if system.open_ended_grading_interface: self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system) else: self.peer_gs = MockPeerGradingService() self.required_peer_grading = instance_state.get('required_peer_grading', 3) self.peer_grader_count = instance_state.get('peer_grader_count', 3) self.min_to_calibrate = instance_state.get('min_to_calibrate', 3) self.max_to_calibrate = instance_state.get('max_to_calibrate', 6) due_date = instance_state.get('due', None) grace_period_string = instance_state.get('graceperiod', None) try: self.timeinfo = TimeInfo(due_date, grace_period_string) except Exception: log.error("Error parsing due date information in location {0}".format(location)) raise self.display_due_date = self.timeinfo.display_due_date self.rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_string = stringify_children(definition['rubric']) self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED) # Static data is passed to the child modules to render self.static_data = { 'max_score': self._max_score, 'max_attempts': self.max_attempts, 'prompt': definition['prompt'], 'rubric': definition['rubric'], 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, 'close_date': self.timeinfo.close_date, 's3_interface': self.system.s3_interface, 'skip_basic_checks': self.skip_basic_checks, 'control': { 'required_peer_grading': self.required_peer_grading, 'peer_grader_count': self.peer_grader_count, 'min_to_calibrate': self.min_to_calibrate, 'max_to_calibrate': self.max_to_calibrate, } } self.task_xml = definition['task_xml'] self.location = location self.setup_next_task() def get_tag_name(self, xml): """ Gets the tag name of a given xml block. Input: XML string Output: The name of the root tag """ tag = etree.fromstring(xml).tag return tag def overwrite_state(self, current_task_state): """ Overwrites an instance state and sets the latest response to the current response. This is used to ensure that the student response is carried over from the first child to the rest. Input: Task state json string Output: Task state json string """ last_response_data = self.get_last_response(self.current_task_number - 1) last_response = last_response_data['response'] loaded_task_state = json.loads(current_task_state) if loaded_task_state['child_state'] == self.INITIAL: loaded_task_state['child_state'] = self.ASSESSING loaded_task_state['child_created'] = True loaded_task_state['child_history'].append({'answer': last_response}) current_task_state = json.dumps(loaded_task_state) return current_task_state def child_modules(self): """ Returns the constructors associated with the child modules in a dictionary. This makes writing functions simpler (saves code duplication) Input: None Output: A dictionary of dictionaries containing the descriptor functions and module functions """ child_modules = { 'openended': open_ended_module.OpenEndedModule, 'selfassessment': self_assessment_module.SelfAssessmentModule, } child_descriptors = { 'openended': open_ended_module.OpenEndedDescriptor, 'selfassessment': self_assessment_module.SelfAssessmentDescriptor, } children = { 'modules': child_modules, 'descriptors': child_descriptors, } return children def setup_next_task(self, reset=False): """ Sets up the next task for the module. Creates an instance state if none exists, carries over the answer from the last instance state to the next if needed. Input: A boolean indicating whether or not the reset function is calling. Output: Boolean True (not useful right now) """ current_task_state = None if len(self.task_states) > self.current_task_number: current_task_state = self.task_states[self.current_task_number] self.current_task_xml = self.task_xml[self.current_task_number] if self.current_task_number > 0: self.ready_to_reset = self.check_allow_reset() if self.ready_to_reset: self.current_task_number = self.current_task_number - 1 current_task_type = self.get_tag_name(self.current_task_xml) children = self.child_modules() child_task_module = children['modules'][current_task_type] self.current_task_descriptor = children['descriptors'][current_task_type](self.system) # This is the xml object created from the xml definition of the current task etree_xml = etree.fromstring(self.current_task_xml) # This sends the etree_xml object through the descriptor module of the current task, and # returns the xml parsed by the descriptor self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system) if current_task_state is None and self.current_task_number == 0: self.current_task = child_task_module(self.system, self.location, self.current_task_parsed_xml, self.current_task_descriptor, self.static_data) self.task_states.append(self.current_task.get_instance_state()) self.state = self.ASSESSING elif current_task_state is None and self.current_task_number > 0: last_response_data = self.get_last_response(self.current_task_number - 1) last_response = last_response_data['response'] current_task_state = json.dumps({ 'child_state': self.ASSESSING, 'version': self.STATE_VERSION, 'max_score': self._max_score, 'child_attempts': 0, 'child_created': True, 'child_history': [{'answer': last_response}], }) self.current_task = child_task_module(self.system, self.location, self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, instance_state=current_task_state) self.task_states.append(self.current_task.get_instance_state()) self.state = self.ASSESSING else: if self.current_task_number > 0 and not reset: current_task_state = self.overwrite_state(current_task_state) self.current_task = child_task_module(self.system, self.location, self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, instance_state=current_task_state) return True def check_allow_reset(self): """ Checks to see if the student has passed the criteria to move to the next module. If not, sets allow_reset to true and halts the student progress through the tasks. Input: None Output: the allow_reset attribute of the current module. """ if not self.ready_to_reset: if self.current_task_number > 0: last_response_data = self.get_last_response(self.current_task_number - 1) current_response_data = self.get_current_attributes(self.current_task_number) if (current_response_data['min_score_to_attempt'] > last_response_data['score'] or current_response_data['max_score_to_attempt'] < last_response_data['score']): self.state = self.DONE self.ready_to_reset = True return self.ready_to_reset def get_context(self): """ Generates a context dictionary that is used to render html. Input: None Output: A dictionary that can be rendered into the combined open ended template. """ task_html = self.get_html_base() # set context variables and render template context = { 'items': [{'content': task_html}], 'ajax_url': self.system.ajax_url, 'allow_reset': self.ready_to_reset, 'state': self.state, 'task_count': len(self.task_xml), 'task_number': self.current_task_number + 1, 'status': self.get_status(False), 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, 'location': self.location, 'legend_list': LEGEND_LIST, 'human_state': HUMAN_STATES.get(self.state,"Not started.") } return context def get_html(self): """ Gets HTML for rendering. Input: None Output: rendered html """ context = self.get_context() html = self.system.render_template('{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context) return html def get_html_nonsystem(self): """ Gets HTML for rendering via AJAX. Does not use system, because system contains some additional html, which is not appropriate for returning via ajax calls. Input: None Output: HTML rendered directly via Mako """ context = self.get_context() html = self.system.render_template('{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context) return html def get_html_base(self): """ Gets the HTML associated with the current child task Input: None Output: Child task HTML """ self.update_task_states() return self.current_task.get_html(self.system) def get_html_ajax(self, data): """ Get HTML in AJAX callback data - Needed to preserve AJAX structure Output: Dictionary with html attribute """ return {'html': self.get_html()} def get_current_attributes(self, task_number): """ Gets the min and max score to attempt attributes of the specified task. Input: The number of the task. Output: The minimum and maximum scores needed to move on to the specified task. """ task_xml = self.task_xml[task_number] etree_xml = etree.fromstring(task_xml) min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0)) max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score)) return {'min_score_to_attempt': min_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt} def get_last_response(self, task_number): """ Returns data associated with the specified task number, such as the last response, score, etc. Input: The number of the task. Output: A dictionary that contains information about the specified task. """ last_response = "" task_state = self.task_states[task_number] task_xml = self.task_xml[task_number] task_type = self.get_tag_name(task_xml) children = self.child_modules() task_descriptor = children['descriptors'][task_type](self.system) etree_xml = etree.fromstring(task_xml) min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0)) max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score)) task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system) task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor, self.static_data, instance_state=task_state) last_response = task.latest_answer() last_score = task.latest_score() all_scores = task.all_scores() last_post_assessment = task.latest_post_assessment(self.system) last_post_feedback = "" feedback_dicts = [{}] grader_ids = [0] submission_ids = [0] if task_type == "openended": last_post_assessment = task.latest_post_assessment(self.system, short_feedback=False, join_feedback=False) if isinstance(last_post_assessment, list): eval_list = [] for i in xrange(0, len(last_post_assessment)): eval_list.append(task.format_feedback_with_evaluation(self.system, last_post_assessment[i])) last_post_evaluation = "".join(eval_list) else: last_post_evaluation = task.format_feedback_with_evaluation(self.system, last_post_assessment) last_post_assessment = last_post_evaluation try: rubric_data = task._parse_score_msg(task.child_history[-1].get('post_assessment', ""), self.system) except Exception: log.debug("Could not parse rubric data from child history. " "Likely we have not yet initialized a previous step, so this is perfectly fine.") rubric_data = {} rubric_scores = rubric_data.get('rubric_scores') grader_types = rubric_data.get('grader_types') feedback_items = rubric_data.get('feedback_items') feedback_dicts = rubric_data.get('feedback_dicts') grader_ids = rubric_data.get('grader_ids') submission_ids = rubric_data.get('submission_ids') elif task_type == "selfassessment": rubric_scores = last_post_assessment grader_types = ['SA'] feedback_items = [''] last_post_assessment = "" last_correctness = task.is_last_response_correct() max_score = task.max_score() state = task.child_state if task_type in HUMAN_TASK_TYPE: human_task_name = HUMAN_TASK_TYPE[task_type] else: human_task_name = task_type if state in task.HUMAN_NAMES: human_state = task.HUMAN_NAMES[state] else: human_state = state if grader_types is not None and len(grader_types) > 0: grader_type = grader_types[0] else: grader_type = "IN" grader_types = ["IN"] if grader_type in HUMAN_GRADER_TYPE: human_grader_name = HUMAN_GRADER_TYPE[grader_type] else: human_grader_name = grader_type last_response_dict = { 'response': last_response, 'score': last_score, 'all_scores': all_scores, 'post_assessment': last_post_assessment, 'type': task_type, 'max_score': max_score, 'state': state, 'human_state': human_state, 'human_task': human_task_name, 'correct': last_correctness, 'min_score_to_attempt': min_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt, 'rubric_scores': rubric_scores, 'grader_types': grader_types, 'feedback_items': feedback_items, 'grader_type': grader_type, 'human_grader_type': human_grader_name, 'feedback_dicts': feedback_dicts, 'grader_ids': grader_ids, 'submission_ids': submission_ids, 'success' : True } return last_response_dict def extract_human_name_from_task(self, task_xml): tree = etree.fromstring(task_xml) payload = tree.xpath("/openended/openendedparam/grader_payload") if len(payload)==0: task_name = "selfassessment" else: inner_payload = json.loads(payload[0].text) task_name = inner_payload['grader_settings'] human_task = HUMAN_TASK_TYPE[task_name] return human_task def update_task_states(self): """ Updates the task state of the combined open ended module with the task state of the current child module. Input: None Output: boolean indicating whether or not the task state changed. """ changed = False if not self.ready_to_reset: self.task_states[self.current_task_number] = self.current_task.get_instance_state() current_task_state = json.loads(self.task_states[self.current_task_number]) if current_task_state['child_state'] == self.DONE: self.current_task_number += 1 if self.current_task_number >= (len(self.task_xml)): self.state = self.DONE self.current_task_number = len(self.task_xml) - 1 else: self.state = self.INITIAL changed = True self.setup_next_task() return changed def update_task_states_ajax(self, return_html): """ Runs the update task states function for ajax calls. Currently the same as update_task_states Input: The html returned by the handle_ajax function of the child Output: New html that should be rendered """ changed = self.update_task_states() if changed: pass return return_html def check_if_student_has_done_needed_grading(self): """ Checks with the ORA server to see if the student has completed the needed peer grading to be shown their grade. For example, if a student submits one response, and three peers grade their response, the student cannot see their grades and feedback unless they reciprocate. Output: success - boolean indicator of success allowed_to_submit - boolean indicator of whether student has done their needed grading or not error_message - If not success, explains why """ student_id = self.system.anonymous_student_id success = False allowed_to_submit = True try: response = self.peer_gs.get_data_for_location(self.location.url(), student_id) count_graded = response['count_graded'] count_required = response['count_required'] student_sub_count = response['student_sub_count'] count_available = response['count_available'] success = True except GradingServiceError: # This is a dev_facing_error log.error("Could not contact external open ended graders for location {0} and student {1}".format( self.location, student_id)) # This is a student_facing_error error_message = "Could not contact the graders. Please notify course staff." return success, allowed_to_submit, error_message except KeyError: log.error("Invalid response from grading server for location {0} and student {1}".format(self.location, student_id)) error_message = "Received invalid response from the graders. Please notify course staff." return success, allowed_to_submit, error_message if count_graded >= count_required or count_available==0: error_message = "" return success, allowed_to_submit, error_message else: allowed_to_submit = False # This is a student_facing_error error_string = ("<h4>Feedback not available yet</h4>" "<p>You need to peer grade {0} more submissions in order to see your feedback.</p>" "<p>You have graded responses from {1} students, and {2} students have graded your submissions. </p>" "<p>You have made {3} submissions.</p>") error_message = error_string.format(count_required - count_graded, count_graded, count_required, student_sub_count) return success, allowed_to_submit, error_message def get_rubric(self, _data): """ Gets the results of a given grader via ajax. Input: AJAX data dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ all_responses = [] success, can_see_rubric, error = self.check_if_student_has_done_needed_grading() if not can_see_rubric: return {'html' : self.system.render_template('{0}/combined_open_ended_hidden_results.html'.format(self.TEMPLATE_DIR), {'error' : error}), 'success' : True, 'hide_reset' : True} contexts = [] rubric_number = self.current_task_number if self.ready_to_reset: rubric_number+=1 response = self.get_last_response(rubric_number) score_length = len(response['grader_types']) for z in xrange(score_length): if response['grader_types'][z] in HUMAN_GRADER_TYPE: try: feedback = response['feedback_dicts'][z].get('feedback', '') except TypeError: return {'success' : False} rubric_scores = [[response['rubric_scores'][z]]] grader_types = [[response['grader_types'][z]]] feedback_items = [[response['feedback_items'][z]]] rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']), rubric_scores, grader_types, feedback_items) contexts.append({ 'result': rubric_html, 'task_name': 'Scored rubric', 'feedback' : feedback }) context = { 'results': contexts, } html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) return {'html': html, 'success': True, 'hide_reset' : False} def get_legend(self, _data): """ Gets the results of a given grader via ajax. Input: AJAX data dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ context = { 'legend_list': LEGEND_LIST, } html = self.system.render_template('{0}/combined_open_ended_legend.html'.format(self.TEMPLATE_DIR), context) return {'html': html, 'success': True} def handle_ajax(self, dispatch, data): """ This is called by courseware.module_render, to handle an AJAX call. "data" is request.POST. Returns a json dictionary: { 'progress_changed' : True/False, 'progress': 'none'/'in_progress'/'done', <other request-specific values here > } """ handlers = { 'next_problem': self.next_problem, 'reset': self.reset, 'get_combined_rubric': self.get_rubric, 'get_legend': self.get_legend, 'get_last_response': self.get_last_response_ajax, 'get_current_state': self.get_current_state, 'get_html': self.get_html_ajax, } if dispatch not in handlers: return_html = self.current_task.handle_ajax(dispatch, data, self.system) return self.update_task_states_ajax(return_html) d = handlers[dispatch](data) return json.dumps(d, cls=ComplexEncoder) def get_current_state(self, data): return self.get_context() def get_last_response_ajax(self, data): """ Get the last response via ajax callback data - Needed to preserve ajax callback structure Output: Last response dictionary """ return self.get_last_response(self.current_task_number) def next_problem(self, _data): """ Called via ajax to advance to the next problem. Input: AJAX data request. Output: Dictionary to be rendered """ self.update_task_states() return {'success': True, 'html': self.get_html_nonsystem(), 'allow_reset': self.ready_to_reset} def reset(self, data): """ If resetting is allowed, reset the state of the combined open ended module. Input: AJAX data dictionary Output: AJAX dictionary to tbe rendered """ if self.state != self.DONE: if not self.ready_to_reset: return self.out_of_sync_error(data) success, can_reset, error = self.check_if_student_has_done_needed_grading() if not can_reset: return {'error': error, 'success': False} if self.student_attempts >= self.max_attempts - 1: if self.student_attempts == self.max_attempts - 1: self.student_attempts += 1 return { 'success': False, # This is a student_facing_error 'error': ( 'You have attempted this question {0} times. ' 'You are only allowed to attempt it {1} times.' ).format(self.student_attempts, self.max_attempts) } self.student_attempts +=1 self.state = self.INITIAL self.ready_to_reset = False for i in xrange(len(self.task_xml)): self.current_task_number = i self.setup_next_task(reset=True) self.current_task.reset(self.system) self.task_states[self.current_task_number] = self.current_task.get_instance_state() self.current_task_number = 0 self.ready_to_reset = False self.setup_next_task() return {'success': True, 'html': self.get_html_nonsystem()} def get_instance_state(self): """ Returns the current instance state. The module can be recreated from the instance state. Input: None Output: A dictionary containing the instance state. """ state = { 'version': self.STATE_VERSION, 'current_task_number': self.current_task_number, 'state': self.state, 'task_states': self.task_states, 'student_attempts': self.student_attempts, 'ready_to_reset': self.ready_to_reset, } return json.dumps(state) def get_status(self, render_via_ajax): """ Gets the status panel to be displayed at the top right. Input: None Output: The status html to be rendered """ status = [] for i in xrange(0, len(self.task_xml)): human_task_name = self.extract_human_name_from_task(self.task_xml[i]) task_data = {'task_number': i + 1, 'human_task' : human_task_name, 'current' : self.current_task_number==i} status.append(task_data) context = { 'status_list': status, 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT, 'legend_list': LEGEND_LIST, 'render_via_ajax': render_via_ajax, } status_html = self.system.render_template("{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR), context) return status_html def check_if_done_and_scored(self): """ Checks if the object is currently in a finished state (either student didn't meet criteria to move to next step, in which case they are in the allow_reset state, or they are done with the question entirely, in which case they will be in the self.DONE state), and if it is scored or not. @return: Boolean corresponding to the above. """ return (self.state == self.DONE or self.ready_to_reset) and self.is_scored def get_score(self): """ Score the student received on the problem, or None if there is no score. Returns: dictionary {'score': integer, from 0 to get_max_score(), 'total': get_max_score()} """ max_score = None score = None #The old default was None, so set to 1 if it is the old default weight weight = self.weight if weight is None: weight = 1 if self.is_scored: # Finds the maximum score of all student attempts and keeps it. score_mat = [] for i in xrange(0, len(self.task_states)): # For each task, extract all student scores on that task (each attempt for each task) last_response = self.get_last_response(i) max_score = last_response.get('max_score', None) score = last_response.get('all_scores', None) if score is not None: # Convert none scores and weight scores properly for z in xrange(0, len(score)): if score[z] is None: score[z] = 0 score[z] *= float(weight) score_mat.append(score) if len(score_mat) > 0: # Currently, assume that the final step is the correct one, and that those are the final scores. # This will change in the future, which is why the machinery above exists to extract all scores on all steps # TODO: better final score handling. scores = score_mat[-1] score = max(scores) else: score = 0 if max_score is not None: # Weight the max score if it is not None max_score *= float(weight) else: # Without a max_score, we cannot have a score! score = None score_dict = { 'score': score, 'total': max_score, } return score_dict def max_score(self): ''' Maximum score. Two notes: * This is generic; in abstract, a problem could be 3/5 points on one randomization, and 5/7 on another ''' max_score = None if self.check_if_done_and_scored(): last_response = self.get_last_response(self.current_task_number) max_score = last_response['max_score'] return max_score def get_progress(self): ''' Return a progress.Progress object that represents how far the student has gone in this module. Must be implemented to get correct progress tracking behavior in nesting modules like sequence and vertical. If this module has no notion of progress, return None. ''' progress_object = Progress(self.current_task_number, len(self.task_xml)) return progress_object def out_of_sync_error(self, data, msg=''): """ return dict out-of-sync error message, and also log. """ #This is a dev_facing_error log.warning("Combined module state out sync. state: %r, data: %r. %s", self.state, data, msg) #This is a student_facing_error return {'success': False, 'error': 'The problem state got out-of-sync. Please try reloading the page.'}
class CombinedOpenEndedV1Module: """ This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc). It transitions between problems, and support arbitrary ordering. Each combined open ended module contains one or multiple "child" modules. Child modules track their own state, and can transition between states. They also implement get_html and handle_ajax. The combined open ended module transitions between child modules as appropriate, tracks its own state, and passess ajax requests from the browser to the child module or handles them itself (in the cases of reset and next problem) ajax actions implemented by all children are: 'save_answer' -- Saves the student answer 'save_assessment' -- Saves the student assessment (or external grader assessment) 'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc) ajax actions implemented by combined open ended module are: 'reset' -- resets the whole combined open ended module and returns to the first child moduleresource_string 'next_problem' -- moves to the next child module Types of children. Task is synonymous with child module, so each combined open ended module incorporates multiple children (tasks): openendedmodule selfassessmentmodule """ STATE_VERSION = 1 # states INITIAL = "initial" ASSESSING = "assessing" INTERMEDIATE_DONE = "intermediate_done" DONE = "done" # Where the templates live for this problem TEMPLATE_DIR = "combinedopenended" def __init__( self, system, location, definition, descriptor, instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs ): """ Definition file should have one or many task blocks, a rubric block, and a prompt block. See DEFAULT_DATA in combined_open_ended_module for a sample. """ self.instance_state = instance_state self.display_name = instance_state.get("display_name", "Open Ended") # We need to set the location here so the child modules can use it system.set("location", location) self.system = system # Tells the system which xml definition to load self.current_task_number = instance_state.get("current_task_number", 0) # This loads the states of the individual children self.task_states = instance_state.get("task_states", []) # Overall state of the combined open ended module self.state = instance_state.get("state", self.INITIAL) self.student_attempts = instance_state.get("student_attempts", 0) self.weight = instance_state.get("weight", 1) # Allow reset is true if student has failed the criteria to move to the next child task self.ready_to_reset = instance_state.get("ready_to_reset", False) self.max_attempts = instance_state.get("max_attempts", MAX_ATTEMPTS) self.is_scored = instance_state.get("graded", IS_SCORED) in TRUE_DICT self.accept_file_upload = instance_state.get("accept_file_upload", ACCEPT_FILE_UPLOAD) in TRUE_DICT self.skip_basic_checks = instance_state.get("skip_spelling_checks", SKIP_BASIC_CHECKS) in TRUE_DICT if system.open_ended_grading_interface: self.peer_gs = PeerGradingService(system.open_ended_grading_interface, system) else: self.peer_gs = MockPeerGradingService() self.required_peer_grading = instance_state.get("required_peer_grading", 3) self.peer_grader_count = instance_state.get("peer_grader_count", 3) self.min_to_calibrate = instance_state.get("min_to_calibrate", 3) self.max_to_calibrate = instance_state.get("max_to_calibrate", 6) due_date = instance_state.get("due", None) grace_period_string = instance_state.get("graceperiod", None) try: self.timeinfo = TimeInfo(due_date, grace_period_string) except Exception: log.error("Error parsing due date information in location {0}".format(location)) raise self.display_due_date = self.timeinfo.display_due_date self.rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_string = stringify_children(definition["rubric"]) self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED) # Static data is passed to the child modules to render self.static_data = { "max_score": self._max_score, "max_attempts": self.max_attempts, "prompt": definition["prompt"], "rubric": definition["rubric"], "display_name": self.display_name, "accept_file_upload": self.accept_file_upload, "close_date": self.timeinfo.close_date, "s3_interface": self.system.s3_interface, "skip_basic_checks": self.skip_basic_checks, "control": { "required_peer_grading": self.required_peer_grading, "peer_grader_count": self.peer_grader_count, "min_to_calibrate": self.min_to_calibrate, "max_to_calibrate": self.max_to_calibrate, }, } self.task_xml = definition["task_xml"] self.location = location self.setup_next_task() def get_tag_name(self, xml): """ Gets the tag name of a given xml block. Input: XML string Output: The name of the root tag """ tag = etree.fromstring(xml).tag return tag def overwrite_state(self, current_task_state): """ Overwrites an instance state and sets the latest response to the current response. This is used to ensure that the student response is carried over from the first child to the rest. Input: Task state json string Output: Task state json string """ last_response_data = self.get_last_response(self.current_task_number - 1) last_response = last_response_data["response"] loaded_task_state = json.loads(current_task_state) if loaded_task_state["child_state"] == self.INITIAL: loaded_task_state["child_state"] = self.ASSESSING loaded_task_state["child_created"] = True loaded_task_state["child_history"].append({"answer": last_response}) current_task_state = json.dumps(loaded_task_state) return current_task_state def child_modules(self): """ Returns the constructors associated with the child modules in a dictionary. This makes writing functions simpler (saves code duplication) Input: None Output: A dictionary of dictionaries containing the descriptor functions and module functions """ child_modules = { "openended": open_ended_module.OpenEndedModule, "selfassessment": self_assessment_module.SelfAssessmentModule, } child_descriptors = { "openended": open_ended_module.OpenEndedDescriptor, "selfassessment": self_assessment_module.SelfAssessmentDescriptor, } children = {"modules": child_modules, "descriptors": child_descriptors} return children def setup_next_task(self, reset=False): """ Sets up the next task for the module. Creates an instance state if none exists, carries over the answer from the last instance state to the next if needed. Input: A boolean indicating whether or not the reset function is calling. Output: Boolean True (not useful right now) """ current_task_state = None if len(self.task_states) > self.current_task_number: current_task_state = self.task_states[self.current_task_number] self.current_task_xml = self.task_xml[self.current_task_number] if self.current_task_number > 0: self.ready_to_reset = self.check_allow_reset() if self.ready_to_reset: self.current_task_number = self.current_task_number - 1 current_task_type = self.get_tag_name(self.current_task_xml) children = self.child_modules() child_task_module = children["modules"][current_task_type] self.current_task_descriptor = children["descriptors"][current_task_type](self.system) # This is the xml object created from the xml definition of the current task etree_xml = etree.fromstring(self.current_task_xml) # This sends the etree_xml object through the descriptor module of the current task, and # returns the xml parsed by the descriptor self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system) if current_task_state is None and self.current_task_number == 0: self.current_task = child_task_module( self.system, self.location, self.current_task_parsed_xml, self.current_task_descriptor, self.static_data ) self.task_states.append(self.current_task.get_instance_state()) self.state = self.ASSESSING elif current_task_state is None and self.current_task_number > 0: last_response_data = self.get_last_response(self.current_task_number - 1) last_response = last_response_data["response"] current_task_state = json.dumps( { "child_state": self.ASSESSING, "version": self.STATE_VERSION, "max_score": self._max_score, "child_attempts": 0, "child_created": True, "child_history": [{"answer": last_response}], } ) self.current_task = child_task_module( self.system, self.location, self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, instance_state=current_task_state, ) self.task_states.append(self.current_task.get_instance_state()) self.state = self.ASSESSING else: if self.current_task_number > 0 and not reset: current_task_state = self.overwrite_state(current_task_state) self.current_task = child_task_module( self.system, self.location, self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, instance_state=current_task_state, ) return True def check_allow_reset(self): """ Checks to see if the student has passed the criteria to move to the next module. If not, sets allow_reset to true and halts the student progress through the tasks. Input: None Output: the allow_reset attribute of the current module. """ if not self.ready_to_reset: if self.current_task_number > 0: last_response_data = self.get_last_response(self.current_task_number - 1) current_response_data = self.get_current_attributes(self.current_task_number) if ( current_response_data["min_score_to_attempt"] > last_response_data["score"] or current_response_data["max_score_to_attempt"] < last_response_data["score"] ): self.state = self.DONE self.ready_to_reset = True return self.ready_to_reset def get_context(self): """ Generates a context dictionary that is used to render html. Input: None Output: A dictionary that can be rendered into the combined open ended template. """ task_html = self.get_html_base() # set context variables and render template context = { "items": [{"content": task_html}], "ajax_url": self.system.ajax_url, "allow_reset": self.ready_to_reset, "state": self.state, "task_count": len(self.task_xml), "task_number": self.current_task_number + 1, "status": self.get_status(False), "display_name": self.display_name, "accept_file_upload": self.accept_file_upload, "location": self.location, "legend_list": LEGEND_LIST, "human_state": HUMAN_STATES.get(self.state, "Not started."), } return context def get_html(self): """ Gets HTML for rendering. Input: None Output: rendered html """ context = self.get_context() html = self.system.render_template("{0}/combined_open_ended.html".format(self.TEMPLATE_DIR), context) return html def get_html_nonsystem(self): """ Gets HTML for rendering via AJAX. Does not use system, because system contains some additional html, which is not appropriate for returning via ajax calls. Input: None Output: HTML rendered directly via Mako """ context = self.get_context() html = self.system.render_template("{0}/combined_open_ended.html".format(self.TEMPLATE_DIR), context) return html def get_html_base(self): """ Gets the HTML associated with the current child task Input: None Output: Child task HTML """ self.update_task_states() return self.current_task.get_html(self.system) def get_html_ajax(self, data): """ Get HTML in AJAX callback data - Needed to preserve AJAX structure Output: Dictionary with html attribute """ return {"html": self.get_html()} def get_current_attributes(self, task_number): """ Gets the min and max score to attempt attributes of the specified task. Input: The number of the task. Output: The minimum and maximum scores needed to move on to the specified task. """ task_xml = self.task_xml[task_number] etree_xml = etree.fromstring(task_xml) min_score_to_attempt = int(etree_xml.attrib.get("min_score_to_attempt", 0)) max_score_to_attempt = int(etree_xml.attrib.get("max_score_to_attempt", self._max_score)) return {"min_score_to_attempt": min_score_to_attempt, "max_score_to_attempt": max_score_to_attempt} def get_last_response(self, task_number): """ Returns data associated with the specified task number, such as the last response, score, etc. Input: The number of the task. Output: A dictionary that contains information about the specified task. """ last_response = "" task_state = self.task_states[task_number] task_xml = self.task_xml[task_number] task_type = self.get_tag_name(task_xml) children = self.child_modules() task_descriptor = children["descriptors"][task_type](self.system) etree_xml = etree.fromstring(task_xml) min_score_to_attempt = int(etree_xml.attrib.get("min_score_to_attempt", 0)) max_score_to_attempt = int(etree_xml.attrib.get("max_score_to_attempt", self._max_score)) task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system) task = children["modules"][task_type]( self.system, self.location, task_parsed_xml, task_descriptor, self.static_data, instance_state=task_state ) last_response = task.latest_answer() last_score = task.latest_score() all_scores = task.all_scores() last_post_assessment = task.latest_post_assessment(self.system) last_post_feedback = "" feedback_dicts = [{}] grader_ids = [0] submission_ids = [0] if task_type == "openended": last_post_assessment = task.latest_post_assessment(self.system, short_feedback=False, join_feedback=False) if isinstance(last_post_assessment, list): eval_list = [] for i in xrange(0, len(last_post_assessment)): eval_list.append(task.format_feedback_with_evaluation(self.system, last_post_assessment[i])) last_post_evaluation = "".join(eval_list) else: last_post_evaluation = task.format_feedback_with_evaluation(self.system, last_post_assessment) last_post_assessment = last_post_evaluation try: rubric_data = task._parse_score_msg(task.child_history[-1].get("post_assessment", ""), self.system) except Exception: log.debug( "Could not parse rubric data from child history. " "Likely we have not yet initialized a previous step, so this is perfectly fine." ) rubric_data = {} rubric_scores = rubric_data.get("rubric_scores") grader_types = rubric_data.get("grader_types") feedback_items = rubric_data.get("feedback_items") feedback_dicts = rubric_data.get("feedback_dicts") grader_ids = rubric_data.get("grader_ids") submission_ids = rubric_data.get("submission_ids") elif task_type == "selfassessment": rubric_scores = last_post_assessment grader_types = ["SA"] feedback_items = [""] last_post_assessment = "" last_correctness = task.is_last_response_correct() max_score = task.max_score() state = task.child_state if task_type in HUMAN_TASK_TYPE: human_task_name = HUMAN_TASK_TYPE[task_type] else: human_task_name = task_type if state in task.HUMAN_NAMES: human_state = task.HUMAN_NAMES[state] else: human_state = state if grader_types is not None and len(grader_types) > 0: grader_type = grader_types[0] else: grader_type = "IN" grader_types = ["IN"] if grader_type in HUMAN_GRADER_TYPE: human_grader_name = HUMAN_GRADER_TYPE[grader_type] else: human_grader_name = grader_type last_response_dict = { "response": last_response, "score": last_score, "all_scores": all_scores, "post_assessment": last_post_assessment, "type": task_type, "max_score": max_score, "state": state, "human_state": human_state, "human_task": human_task_name, "correct": last_correctness, "min_score_to_attempt": min_score_to_attempt, "max_score_to_attempt": max_score_to_attempt, "rubric_scores": rubric_scores, "grader_types": grader_types, "feedback_items": feedback_items, "grader_type": grader_type, "human_grader_type": human_grader_name, "feedback_dicts": feedback_dicts, "grader_ids": grader_ids, "submission_ids": submission_ids, "success": True, } return last_response_dict def extract_human_name_from_task(self, task_xml): tree = etree.fromstring(task_xml) payload = tree.xpath("/openended/openendedparam/grader_payload") if len(payload) == 0: task_name = "selfassessment" else: inner_payload = json.loads(payload[0].text) task_name = inner_payload["grader_settings"] human_task = HUMAN_TASK_TYPE[task_name] return human_task def update_task_states(self): """ Updates the task state of the combined open ended module with the task state of the current child module. Input: None Output: boolean indicating whether or not the task state changed. """ changed = False if not self.ready_to_reset: self.task_states[self.current_task_number] = self.current_task.get_instance_state() current_task_state = json.loads(self.task_states[self.current_task_number]) if current_task_state["child_state"] == self.DONE: self.current_task_number += 1 if self.current_task_number >= (len(self.task_xml)): self.state = self.DONE self.current_task_number = len(self.task_xml) - 1 else: self.state = self.INITIAL changed = True self.setup_next_task() return changed def update_task_states_ajax(self, return_html): """ Runs the update task states function for ajax calls. Currently the same as update_task_states Input: The html returned by the handle_ajax function of the child Output: New html that should be rendered """ changed = self.update_task_states() if changed: pass return return_html def check_if_student_has_done_needed_grading(self): """ Checks with the ORA server to see if the student has completed the needed peer grading to be shown their grade. For example, if a student submits one response, and three peers grade their response, the student cannot see their grades and feedback unless they reciprocate. Output: success - boolean indicator of success allowed_to_submit - boolean indicator of whether student has done their needed grading or not error_message - If not success, explains why """ student_id = self.system.anonymous_student_id success = False allowed_to_submit = True try: response = self.peer_gs.get_data_for_location(self.location.url(), student_id) count_graded = response["count_graded"] count_required = response["count_required"] student_sub_count = response["student_sub_count"] count_available = response["count_available"] success = True except GradingServiceError: # This is a dev_facing_error log.error( "Could not contact external open ended graders for location {0} and student {1}".format( self.location, student_id ) ) # This is a student_facing_error error_message = "Could not contact the graders. Please notify course staff." return success, allowed_to_submit, error_message except KeyError: log.error( "Invalid response from grading server for location {0} and student {1}".format( self.location, student_id ) ) error_message = "Received invalid response from the graders. Please notify course staff." return success, allowed_to_submit, error_message if count_graded >= count_required or count_available == 0: error_message = "" return success, allowed_to_submit, error_message else: allowed_to_submit = False # This is a student_facing_error error_string = ( "<h4>Feedback not available yet</h4>" "<p>You need to peer grade {0} more submissions in order to see your feedback.</p>" "<p>You have graded responses from {1} students, and {2} students have graded your submissions. </p>" "<p>You have made {3} submissions.</p>" ) error_message = error_string.format( count_required - count_graded, count_graded, count_required, student_sub_count ) return success, allowed_to_submit, error_message def get_rubric(self, _data): """ Gets the results of a given grader via ajax. Input: AJAX data dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ all_responses = [] success, can_see_rubric, error = self.check_if_student_has_done_needed_grading() if not can_see_rubric: return { "html": self.system.render_template( "{0}/combined_open_ended_hidden_results.html".format(self.TEMPLATE_DIR), {"error": error} ), "success": True, "hide_reset": True, } contexts = [] rubric_number = self.current_task_number if self.ready_to_reset: rubric_number += 1 response = self.get_last_response(rubric_number) score_length = len(response["grader_types"]) for z in xrange(score_length): if response["grader_types"][z] in HUMAN_GRADER_TYPE: try: feedback = response["feedback_dicts"][z].get("feedback", "") except TypeError: return {"success": False} rubric_scores = [[response["rubric_scores"][z]]] grader_types = [[response["grader_types"][z]]] feedback_items = [[response["feedback_items"][z]]] rubric_html = self.rubric_renderer.render_combined_rubric( stringify_children(self.static_data["rubric"]), rubric_scores, grader_types, feedback_items ) contexts.append({"result": rubric_html, "task_name": "Scored rubric", "feedback": feedback}) context = {"results": contexts} html = self.system.render_template("{0}/combined_open_ended_results.html".format(self.TEMPLATE_DIR), context) return {"html": html, "success": True, "hide_reset": False} def get_legend(self, _data): """ Gets the results of a given grader via ajax. Input: AJAX data dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ context = {"legend_list": LEGEND_LIST} html = self.system.render_template("{0}/combined_open_ended_legend.html".format(self.TEMPLATE_DIR), context) return {"html": html, "success": True} def handle_ajax(self, dispatch, data): """ This is called by courseware.module_render, to handle an AJAX call. "data" is request.POST. Returns a json dictionary: { 'progress_changed' : True/False, 'progress': 'none'/'in_progress'/'done', <other request-specific values here > } """ handlers = { "next_problem": self.next_problem, "reset": self.reset, "get_combined_rubric": self.get_rubric, "get_legend": self.get_legend, "get_last_response": self.get_last_response_ajax, "get_current_state": self.get_current_state, "get_html": self.get_html_ajax, } if dispatch not in handlers: return_html = self.current_task.handle_ajax(dispatch, data, self.system) return self.update_task_states_ajax(return_html) d = handlers[dispatch](data) return json.dumps(d, cls=ComplexEncoder) def get_current_state(self, data): return self.get_context() def get_last_response_ajax(self, data): """ Get the last response via ajax callback data - Needed to preserve ajax callback structure Output: Last response dictionary """ return self.get_last_response(self.current_task_number) def next_problem(self, _data): """ Called via ajax to advance to the next problem. Input: AJAX data request. Output: Dictionary to be rendered """ self.update_task_states() return {"success": True, "html": self.get_html_nonsystem(), "allow_reset": self.ready_to_reset} def reset(self, data): """ If resetting is allowed, reset the state of the combined open ended module. Input: AJAX data dictionary Output: AJAX dictionary to tbe rendered """ if self.state != self.DONE: if not self.ready_to_reset: return self.out_of_sync_error(data) success, can_reset, error = self.check_if_student_has_done_needed_grading() if not can_reset: return {"error": error, "success": False} if self.student_attempts >= self.max_attempts - 1: if self.student_attempts == self.max_attempts - 1: self.student_attempts += 1 return { "success": False, # This is a student_facing_error "error": ( "You have attempted this question {0} times. " "You are only allowed to attempt it {1} times." ).format(self.student_attempts, self.max_attempts), } self.student_attempts += 1 self.state = self.INITIAL self.ready_to_reset = False for i in xrange(len(self.task_xml)): self.current_task_number = i self.setup_next_task(reset=True) self.current_task.reset(self.system) self.task_states[self.current_task_number] = self.current_task.get_instance_state() self.current_task_number = 0 self.ready_to_reset = False self.setup_next_task() return {"success": True, "html": self.get_html_nonsystem()} def get_instance_state(self): """ Returns the current instance state. The module can be recreated from the instance state. Input: None Output: A dictionary containing the instance state. """ state = { "version": self.STATE_VERSION, "current_task_number": self.current_task_number, "state": self.state, "task_states": self.task_states, "student_attempts": self.student_attempts, "ready_to_reset": self.ready_to_reset, } return json.dumps(state) def get_status(self, render_via_ajax): """ Gets the status panel to be displayed at the top right. Input: None Output: The status html to be rendered """ status = [] for i in xrange(0, len(self.task_xml)): human_task_name = self.extract_human_name_from_task(self.task_xml[i]) task_data = {"task_number": i + 1, "human_task": human_task_name, "current": self.current_task_number == i} status.append(task_data) context = { "status_list": status, "grader_type_image_dict": GRADER_TYPE_IMAGE_DICT, "legend_list": LEGEND_LIST, "render_via_ajax": render_via_ajax, } status_html = self.system.render_template( "{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR), context ) return status_html def check_if_done_and_scored(self): """ Checks if the object is currently in a finished state (either student didn't meet criteria to move to next step, in which case they are in the allow_reset state, or they are done with the question entirely, in which case they will be in the self.DONE state), and if it is scored or not. @return: Boolean corresponding to the above. """ return (self.state == self.DONE or self.ready_to_reset) and self.is_scored def get_score(self): """ Score the student received on the problem, or None if there is no score. Returns: dictionary {'score': integer, from 0 to get_max_score(), 'total': get_max_score()} """ max_score = None score = None # The old default was None, so set to 1 if it is the old default weight weight = self.weight if weight is None: weight = 1 if self.is_scored: # Finds the maximum score of all student attempts and keeps it. score_mat = [] for i in xrange(0, len(self.task_states)): # For each task, extract all student scores on that task (each attempt for each task) last_response = self.get_last_response(i) max_score = last_response.get("max_score", None) score = last_response.get("all_scores", None) if score is not None: # Convert none scores and weight scores properly for z in xrange(0, len(score)): if score[z] is None: score[z] = 0 score[z] *= float(weight) score_mat.append(score) if len(score_mat) > 0: # Currently, assume that the final step is the correct one, and that those are the final scores. # This will change in the future, which is why the machinery above exists to extract all scores on all steps # TODO: better final score handling. scores = score_mat[-1] score = max(scores) else: score = 0 if max_score is not None: # Weight the max score if it is not None max_score *= float(weight) else: # Without a max_score, we cannot have a score! score = None score_dict = {"score": score, "total": max_score} return score_dict def max_score(self): """ Maximum score. Two notes: * This is generic; in abstract, a problem could be 3/5 points on one randomization, and 5/7 on another """ max_score = None if self.check_if_done_and_scored(): last_response = self.get_last_response(self.current_task_number) max_score = last_response["max_score"] return max_score def get_progress(self): """ Return a progress.Progress object that represents how far the student has gone in this module. Must be implemented to get correct progress tracking behavior in nesting modules like sequence and vertical. If this module has no notion of progress, return None. """ progress_object = Progress(self.current_task_number, len(self.task_xml)) return progress_object def out_of_sync_error(self, data, msg=""): """ return dict out-of-sync error message, and also log. """ # This is a dev_facing_error log.warning("Combined module state out sync. state: %r, data: %r. %s", self.state, data, msg) # This is a student_facing_error return {"success": False, "error": "The problem state got out-of-sync. Please try reloading the page."}