def update_editor_context(self, data, suffix=''): # pylint: disable=unused-argument """ Update the XBlock's configuration. Args: data (dict): Data from the request; should have the format described in the editor schema. Keyword Arguments: suffix (str): Not used Returns: dict with keys 'success' (bool) and 'msg' (str) """ # Validate and sanitize the data using a schema # If the data is invalid, this means something is wrong with # our JavaScript, so we log an exception. try: data = EDITOR_UPDATE_SCHEMA(data) except MultipleInvalid: logger.exception('Editor context is invalid') return { 'success': False, 'msg': self._('Error updating XBlock configuration') } # Check that the editor assessment order contains all the assessments. current_order = set(data['editor_assessments_order']) if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != current_order: # Backwards compatibility: "staff-assessment" may not be present. # If that is the only problem with this data, just add it manually and continue. if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) == current_order | { 'staff-assessment' }: data['editor_assessments_order'].append('staff-assessment') logger.info( 'Backwards compatibility: editor_assessments_order now contains staff-assessment' ) else: logger.exception( 'editor_assessments_order does not contain all expected assessment types' ) return { 'success': False, 'msg': self._('Error updating XBlock configuration') } if not data['text_response'] and not data['file_upload_response']: return { 'success': False, 'msg': self. _("Error: both text and file upload responses can't be disabled" ) } if not data['text_response'] and data[ 'file_upload_response'] == 'optional': return { 'success': False, 'msg': self. _("Error: in case if text response is disabled file upload response must be required" ) } if not data['file_upload_response'] and data[ 'text_response'] == 'optional': return { 'success': False, 'msg': self. _("Error: in case if file upload response is disabled text response must be required" ) } # Backwards compatibility: We used to treat "name" as both a user-facing label # and a unique identifier for criteria and options. # Now we treat "name" as a unique identifier, and we've added an additional "label" # field that we display to the user. # If the JavaScript editor sends us a criterion or option without a "name" # field, we should assign it a unique identifier. for criterion in data['criteria']: if 'name' not in criterion: criterion['name'] = uuid4().hex for option in criterion['options']: if 'name' not in option: option['name'] = uuid4().hex xblock_validator = validator(self, self._) success, msg = xblock_validator( create_rubric_dict(data['prompts'], data['criteria']), data['assessments'], submission_start=data['submission_start'], submission_due=data['submission_due'], leaderboard_show=data['leaderboard_show']) if not success: return { 'success': False, 'msg': self._('Validation error: {error}').format(error=msg) } # At this point, all the input data has been validated, # so we can safely modify the XBlock fields. self.title = data['title'] self.display_name = data['title'] self.prompts = data['prompts'] self.prompts_type = data['prompts_type'] self.rubric_criteria = data['criteria'] self.rubric_assessments = data['assessments'] self.editor_assessments_order = data['editor_assessments_order'] self.rubric_feedback_prompt = data['feedback_prompt'] self.rubric_feedback_default_text = data['feedback_default_text'] self.submission_start = data['submission_start'] self.submission_due = data['submission_due'] self.text_response = data['text_response'] self.file_upload_response = data['file_upload_response'] if data['file_upload_response']: self.file_upload_type = data['file_upload_type'] self.white_listed_file_types_string = data[ 'white_listed_file_types'] else: self.file_upload_type = None self.white_listed_file_types_string = None self.allow_latex = bool(data['allow_latex']) self.leaderboard_show = data['leaderboard_show'] return { 'success': True, 'msg': self._(u'Successfully updated OpenAssessment XBlock') }
def update_editor_context(self, data, suffix=''): """ Update the XBlock's configuration. Args: data (dict): Data from the request; should have the format described in the editor schema. Keyword Arguments: suffix (str): Not used Returns: dict with keys 'success' (bool) and 'msg' (str) """ # Validate and sanitize the data using a schema # If the data is invalid, this means something is wrong with # our JavaScript, so we log an exception. try: data = EDITOR_UPDATE_SCHEMA(data) except MultipleInvalid: logger.exception('Editor context is invalid') return { 'success': False, 'msg': self._('Error updating XBlock configuration') } # Check that the editor assessment order contains all the assessments. We are more flexible on example-based. if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != ( set(data['editor_assessments_order']) - {'example-based-assessment'}): logger.exception( 'editor_assessments_order does not contain all expected assessment types' ) return { 'success': False, 'msg': self._('Error updating XBlock configuration') } # Backwards compatibility: We used to treat "name" as both a user-facing label # and a unique identifier for criteria and options. # Now we treat "name" as a unique identifier, and we've added an additional "label" # field that we display to the user. # If the JavaScript editor sends us a criterion or option without a "name" # field, we should assign it a unique identifier. for criterion in data['criteria']: if 'name' not in criterion: criterion['name'] = uuid4().hex for option in criterion['options']: if 'name' not in option: option['name'] = uuid4().hex # If example based assessment is enabled, we replace it's xml definition with the dictionary # definition we expect for validation and storing. for assessment in data['assessments']: if assessment['name'] == 'example-based-assessment': try: assessment['examples'] = parse_examples_from_xml_str( assessment['examples_xml']) except UpdateFromXmlError: return { 'success': False, 'msg': self. _(u'Validation error: There was an error in the XML definition of the ' u'examples provided by the user. Please correct the XML definition before saving.' ) } except KeyError: return { 'success': False, 'msg': self. _(u'Validation error: No examples were provided for example based assessment.' ) } # This is where we default to EASE for problems which are edited in the GUI assessment['algorithm_id'] = 'ease' if assessment['name'] == 'student-training': for example in assessment['examples']: example['answer'] = { 'parts': [{ 'text': text } for text in example['answer']] } xblock_validator = validator(self, self._) success, msg = xblock_validator( create_rubric_dict(data['prompts'], data['criteria']), data['assessments'], submission_start=data['submission_start'], submission_due=data['submission_due'], leaderboard_show=data['leaderboard_show']) if not success: return { 'success': False, 'msg': self._('Validation error: {error}').format(error=msg) } # At this point, all the input data has been validated, # so we can safely modify the XBlock fields. self.title = data['title'] self.display_name = data['title'] self.prompts = data['prompts'] self.rubric_criteria = data['criteria'] self.rubric_assessments = data['assessments'] self.editor_assessments_order = data['editor_assessments_order'] self.rubric_feedback_prompt = data['feedback_prompt'] self.rubric_feedback_default_text = data['feedback_default_text'] self.submission_start = data['submission_start'] self.submission_due = data['submission_due'] self.allow_file_upload = bool(data['allow_file_upload']) self.allow_latex = bool(data['allow_latex']) self.leaderboard_show = data['leaderboard_show'] return { 'success': True, 'msg': self._(u'Successfully updated OpenAssessment XBlock') }