Esempio n. 1
0
    def update_xml(self, data, suffix=''):
        """
        Update the XBlock's XML.

        Args:
            data (dict): Data from the request; should have a value for the key 'xml'
                containing the XML for this XBlock.

        Kwargs:
            suffix (str): Not used

        Returns:
            dict with keys 'success' (bool) and 'msg' (str)
        """
        if 'xml' in data:
            try:
                update_from_xml_str(self, data['xml'], validator=validator(self))

            except ValidationError as ex:
                return {'success': False, 'msg': _('Validation error: {error}').format(error=ex)}

            except UpdateFromXmlError as ex:
                return {'success': False, 'msg': _('An error occurred while saving: {error}').format(error=ex)}

            else:
                return {'success': True, 'msg': _('Successfully updated OpenAssessment XBlock')}

        else:
            return {'success': False, 'msg': _('Must specify "xml" in request JSON dict.')}
Esempio n. 2
0
    def parse_xml(cls, node, runtime, keys, id_generator):
        """Instantiate XBlock object from runtime XML definition.

        Inherited by XBlock core.

        """
        config = parse_from_xml(node)
        block = runtime.construct_xblock_from_class(cls, keys)

        xblock_validator = validator(block, block._, strict_post_release=False)
        xblock_validator(create_rubric_dict(config['prompt'],
                                            config['rubric_criteria']),
                         config['rubric_assessments'],
                         submission_start=config['submission_start'],
                         submission_due=config['submission_due'],
                         leaderboard_show=config['leaderboard_show'])

        block.rubric_criteria = config['rubric_criteria']
        block.rubric_feedback_prompt = config['rubric_feedback_prompt']
        block.rubric_feedback_default_text = config[
            'rubric_feedback_default_text']
        block.rubric_assessments = config['rubric_assessments']
        block.submission_start = config['submission_start']
        block.submission_due = config['submission_due']
        block.title = config['title']
        block.prompt = config['prompt']
        block.allow_file_upload = config['allow_file_upload']
        block.allow_latex = config['allow_latex']
        block.leaderboard_show = config['leaderboard_show']

        return block
Esempio n. 3
0
    def parse_xml(cls, node, runtime, keys, id_generator):
        """Instantiate XBlock object from runtime XML definition.

        Inherited by XBlock core.

        """
        config = parse_from_xml(node)
        block = runtime.construct_xblock_from_class(cls, keys)

        xblock_validator = validator(block, block._, strict_post_release=False)
        xblock_validator(
            create_rubric_dict(config["prompt"], config["rubric_criteria"]),
            config["rubric_assessments"],
            submission_start=config["submission_start"],
            submission_due=config["submission_due"],
            leaderboard_show=config["leaderboard_show"],
        )

        block.rubric_criteria = config["rubric_criteria"]
        block.rubric_feedback_prompt = config["rubric_feedback_prompt"]
        block.rubric_assessments = config["rubric_assessments"]
        block.submission_start = config["submission_start"]
        block.submission_due = config["submission_due"]
        block.title = config["title"]
        block.prompt = config["prompt"]
        block.allow_file_upload = config["allow_file_upload"]
        block.leaderboard_show = config["leaderboard_show"]

        return block
Esempio n. 4
0
    def parse_xml(cls, node, runtime, keys, id_generator):
        """Instantiate XBlock object from runtime XML definition.

        Inherited by XBlock core.

        """
        config = parse_from_xml(node)
        block = runtime.construct_xblock_from_class(cls, keys)

        xblock_validator = validator(block, block._, strict_post_release=False)
        xblock_validator(
            create_rubric_dict(config['prompts'], config['rubric_criteria']),
            config['rubric_assessments'],
            submission_start=config['submission_start'],
            submission_due=config['submission_due'],
            leaderboard_show=config['leaderboard_show']
        )

        block.rubric_criteria = config['rubric_criteria']
        block.rubric_feedback_prompt = config['rubric_feedback_prompt']
        block.rubric_feedback_default_text = config['rubric_feedback_default_text']
        block.rubric_assessments = config['rubric_assessments']
        block.submission_start = config['submission_start']
        block.submission_due = config['submission_due']
        block.title = config['title']
        block.prompts = config['prompts']
        block.allow_file_upload = config['allow_file_upload']
        block.file_upload_type = config['file_upload_type']
        block.white_listed_file_types_string = config['white_listed_file_types']
        block.allow_latex = config['allow_latex']
        block.leaderboard_show = config['leaderboard_show']

        return block
Esempio n. 5
0
    def parse_xml(cls, node, runtime, keys, id_generator):
        """Instantiate XBlock object from runtime XML definition.

        Inherited by XBlock core.

        """
        block = runtime.construct_xblock_from_class(cls, keys)

        return update_from_xml(block, node, validator=validator(block, strict_post_release=False))
Esempio n. 6
0
 def setUp(self):
     """
     Mock the OA XBlock and create a validator function.
     """
     self.oa_block = mock.MagicMock(OpenAssessmentBlock)
     self.oa_block.is_released.return_value = False
     self.oa_block.rubric_assessments.return_value = []
     self.oa_block.prompt = ""
     self.oa_block.rubric_criteria = []
     self.oa_block.start = None
     self.oa_block.due = None
     self.validator = validator(self.oa_block)
Esempio n. 7
0
 def setUp(self):
     """
     Mock the OA XBlock and create a validator function.
     """
     self.oa_block = mock.MagicMock(OpenAssessmentBlock)
     self.oa_block.is_released.return_value = False
     self.oa_block.rubric_assessments.return_value = []
     self.oa_block.prompt = ""
     self.oa_block.rubric_criteria = []
     self.oa_block.start = None
     self.oa_block.due = None
     self.validator = validator(self.oa_block)
Esempio n. 8
0
    def parse_xml(cls, node, runtime, keys, id_generator):
        """Instantiate XBlock object from runtime XML definition.

        Inherited by XBlock core.

        """
        def unknown_handler(block, child):
            """Recursively embed xblocks for nodes we don't recognize"""
            block.runtime.add_node_as_child(block, child, id_generator)
        block = runtime.construct_xblock_from_class(cls, keys)

        return update_from_xml(block, node, validator=validator(block, strict_post_release=False))
Esempio n. 9
0
    def update_xml(self, data, suffix=''):
        """
        Update the XBlock's XML.

        Args:
            data (dict): Data from the request; should have a value for the key 'xml'
                containing the XML for this XBlock.

        Kwargs:
            suffix (str): Not used

        Returns:
            dict with keys 'success' (bool) and 'msg' (str)
        """
        if 'xml' in data:
            try:
                update_from_xml_str(self,
                                    data['xml'],
                                    validator=validator(self))

            except ValidationError as ex:
                return {
                    'success': False,
                    'msg':
                    _('Validation error: {error}').format(error=ex.message)
                }

            except UpdateFromXmlError as ex:
                return {
                    'success':
                    False,
                    'msg':
                    _('An error occurred while saving: {error}').format(
                        error=ex.message)
                }

            else:
                return {
                    'success': True,
                    'msg': _('Successfully updated OpenAssessment XBlock')
                }

        else:
            return {
                'success': False,
                'msg': _('Must specify "xml" in request JSON dict.')
            }
Esempio n. 10
0
    def parse_xml(cls, node, runtime, keys, id_generator):
        """Instantiate XBlock object from runtime XML definition.

        Inherited by XBlock core.

        """
        config = parse_from_xml(node)
        block = runtime.construct_xblock_from_class(cls, keys)

        xblock_validator = validator(block, block._, strict_post_release=False)
        xblock_validator(create_rubric_dict(config['prompts'],
                                            config['rubric_criteria']),
                         config['rubric_assessments'],
                         submission_start=config['submission_start'],
                         submission_due=config['submission_due'],
                         leaderboard_show=config['leaderboard_show'])

        block.rubric_criteria = config['rubric_criteria']
        block.rubric_feedback_prompt = config['rubric_feedback_prompt']
        block.rubric_feedback_default_text = config[
            'rubric_feedback_default_text']
        block.rubric_assessments = config['rubric_assessments']
        block.submission_start = config['submission_start']
        block.submission_due = config['submission_due']
        block.title = config['title']
        block.prompts = config['prompts']
        block.prompts_type = config['prompts_type']
        block.text_response = config['text_response']
        block.text_response_editor = config['text_response_editor']
        block.file_upload_response = config['file_upload_response']
        block.allow_file_upload = config['allow_file_upload']
        block.file_upload_type = config['file_upload_type']
        block.white_listed_file_types_string = config[
            'white_listed_file_types']
        block.allow_multiple_files = config['allow_multiple_files']
        block.allow_latex = config['allow_latex']
        block.leaderboard_show = config['leaderboard_show']
        block.group_access = config['group_access']
        block.teams_enabled = config['teams_enabled']
        block.selected_teamset_id = config['selected_teamset_id']
        block.show_rubric_during_response = config[
            'show_rubric_during_response']
        return block
Esempio n. 11
0
    def update_editor_context(self, data, suffix=''):  # pylint: disable=unused-argument
        """
        Update the XBlock's configuration.

        Args:
            data (dict): Data from the request; should have the format described
            in the editor schema.

        Keyword Arguments:
            suffix (str): Not used

        Returns:
            dict with keys 'success' (bool) and 'msg' (str)
        """

        # Validate and sanitize the data using a schema
        # If the data is invalid, this means something is wrong with
        # our JavaScript, so we log an exception.
        try:
            data = EDITOR_UPDATE_SCHEMA(data)
        except MultipleInvalid:
            logger.exception('Editor context is invalid')
            return {
                'success': False,
                'msg': self._('Error updating XBlock configuration')
            }

        # Check that the editor assessment order contains all the assessments.
        current_order = set(data['editor_assessments_order'])
        if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != current_order:
            # Backwards compatibility: "staff-assessment" may not be present.
            # If that is the only problem with this data, just add it manually and continue.
            if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) == current_order | {
                    'staff-assessment'
            }:
                data['editor_assessments_order'].append('staff-assessment')
                logger.info(
                    'Backwards compatibility: editor_assessments_order now contains staff-assessment'
                )
            else:
                logger.exception(
                    'editor_assessments_order does not contain all expected assessment types'
                )
                return {
                    'success': False,
                    'msg': self._('Error updating XBlock configuration')
                }

        if not data['text_response'] and not data['file_upload_response']:
            return {
                'success':
                False,
                'msg':
                self.
                _("Error: both text and file upload responses can't be disabled"
                  )
            }
        if not data['text_response'] and data[
                'file_upload_response'] == 'optional':
            return {
                'success':
                False,
                'msg':
                self.
                _("Error: in case if text response is disabled file upload response must be required"
                  )
            }
        if not data['file_upload_response'] and data[
                'text_response'] == 'optional':
            return {
                'success':
                False,
                'msg':
                self.
                _("Error: in case if file upload response is disabled text response must be required"
                  )
            }

        # Backwards compatibility: We used to treat "name" as both a user-facing label
        # and a unique identifier for criteria and options.
        # Now we treat "name" as a unique identifier, and we've added an additional "label"
        # field that we display to the user.
        # If the JavaScript editor sends us a criterion or option without a "name"
        # field, we should assign it a unique identifier.
        for criterion in data['criteria']:
            if 'name' not in criterion:
                criterion['name'] = uuid4().hex
            for option in criterion['options']:
                if 'name' not in option:
                    option['name'] = uuid4().hex

        xblock_validator = validator(self, self._)
        success, msg = xblock_validator(
            create_rubric_dict(data['prompts'], data['criteria']),
            data['assessments'],
            submission_start=data['submission_start'],
            submission_due=data['submission_due'],
            leaderboard_show=data['leaderboard_show'])
        if not success:
            return {
                'success': False,
                'msg': self._('Validation error: {error}').format(error=msg)
            }

        # At this point, all the input data has been validated,
        # so we can safely modify the XBlock fields.
        self.title = data['title']
        self.display_name = data['title']
        self.prompts = data['prompts']
        self.prompts_type = data['prompts_type']
        self.rubric_criteria = data['criteria']
        self.rubric_assessments = data['assessments']
        self.editor_assessments_order = data['editor_assessments_order']
        self.rubric_feedback_prompt = data['feedback_prompt']
        self.rubric_feedback_default_text = data['feedback_default_text']
        self.submission_start = data['submission_start']
        self.submission_due = data['submission_due']
        self.text_response = data['text_response']
        self.file_upload_response = data['file_upload_response']
        if data['file_upload_response']:
            self.file_upload_type = data['file_upload_type']
            self.white_listed_file_types_string = data[
                'white_listed_file_types']
        else:
            self.file_upload_type = None
            self.white_listed_file_types_string = None
        self.allow_latex = bool(data['allow_latex'])
        self.leaderboard_show = data['leaderboard_show']

        return {
            'success': True,
            'msg': self._(u'Successfully updated OpenAssessment XBlock')
        }
Esempio n. 12
0
    def update_editor_context(self, data, suffix=''):  # pylint: disable=unused-argument
        """
        Update the XBlock's configuration.

        Args:
            data (dict): Data from the request; should have the format described
            in the editor schema.

        Keyword Arguments:
            suffix (str): Not used

        Returns:
            dict with keys 'success' (bool) and 'msg' (str)
        """

        # Validate and sanitize the data using a schema
        # If the data is invalid, this means something is wrong with
        # our JavaScript, so we log an exception.
        try:
            data = EDITOR_UPDATE_SCHEMA(data)
        except MultipleInvalid:
            logger.exception('Editor context is invalid')
            return {'success': False, 'msg': self._('Error updating XBlock configuration')}

        # Check that the editor assessment order contains all the assessments.
        current_order = set(data['editor_assessments_order'])
        if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != current_order:
            # Backwards compatibility: "staff-assessment" may not be present.
            # If that is the only problem with this data, just add it manually and continue.
            if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) == current_order | {'staff-assessment'}:
                data['editor_assessments_order'].append('staff-assessment')
                logger.info('Backwards compatibility: editor_assessments_order now contains staff-assessment')
            else:
                logger.exception('editor_assessments_order does not contain all expected assessment types')
                return {'success': False, 'msg': self._('Error updating XBlock configuration')}

        if not data['text_response'] and not data['file_upload_response']:
            return {'success': False, 'msg': self._("Error: both text and file upload responses can't be disabled")}
        if not data['text_response'] and data['file_upload_response'] == 'optional':
            return {'success': False,
                    'msg': self._("Error: in case if text response is disabled file upload response must be required")}
        if not data['file_upload_response'] and data['text_response'] == 'optional':
            return {'success': False,
                    'msg': self._("Error: in case if file upload response is disabled text response must be required")}

        # Backwards compatibility: We used to treat "name" as both a user-facing label
        # and a unique identifier for criteria and options.
        # Now we treat "name" as a unique identifier, and we've added an additional "label"
        # field that we display to the user.
        # If the JavaScript editor sends us a criterion or option without a "name"
        # field, we should assign it a unique identifier.
        for criterion in data['criteria']:
            if 'name' not in criterion:
                criterion['name'] = uuid4().hex
            for option in criterion['options']:
                if 'name' not in option:
                    option['name'] = uuid4().hex

        xblock_validator = validator(self, self._)
        success, msg = xblock_validator(
            create_rubric_dict(data['prompts'], data['criteria']),
            data['assessments'],
            submission_start=data['submission_start'],
            submission_due=data['submission_due'],
            leaderboard_show=data['leaderboard_show']
        )
        if not success:
            return {'success': False, 'msg': self._('Validation error: {error}').format(error=msg)}

        # At this point, all the input data has been validated,
        # so we can safely modify the XBlock fields.
        self.title = data['title']
        self.display_name = data['title']
        self.prompts = data['prompts']
        self.prompts_type = data['prompts_type']
        self.rubric_criteria = data['criteria']
        self.rubric_assessments = data['assessments']
        self.editor_assessments_order = data['editor_assessments_order']
        self.rubric_feedback_prompt = data['feedback_prompt']
        self.rubric_feedback_default_text = data['feedback_default_text']
        self.submission_start = data['submission_start']
        self.submission_due = data['submission_due']
        self.text_response = data['text_response']
        self.file_upload_response = data['file_upload_response']
        if data['file_upload_response']:
            self.file_upload_type = data['file_upload_type']
            self.white_listed_file_types_string = data['white_listed_file_types']
        else:
            self.file_upload_type = None
            self.white_listed_file_types_string = None
        self.allow_latex = bool(data['allow_latex'])
        self.leaderboard_show = data['leaderboard_show']

        return {'success': True, 'msg': self._(u'Successfully updated OpenAssessment XBlock')}
Esempio n. 13
0
    def update_editor_context(self, data, suffix=''):
        """
        Update the XBlock's configuration.

        Args:
            data (dict): Data from the request; should have the format described
            in the editor schema.

        Keyword Arguments:
            suffix (str): Not used

        Returns:
            dict with keys 'success' (bool) and 'msg' (str)
        """

        # Validate and sanitize the data using a schema
        # If the data is invalid, this means something is wrong with
        # our JavaScript, so we log an exception.
        try:
            data = EDITOR_UPDATE_SCHEMA(data)
        except MultipleInvalid:
            logger.exception('Editor context is invalid')
            return {'success': False, 'msg': self._('Error updating XBlock configuration')}

        # Check that the editor assessment order contains all the assessments.  We are more flexible on example-based.
        if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != (set(data['editor_assessments_order']) - {'example-based-assessment'}):
            logger.exception('editor_assessments_order does not contain all expected assessment types')
            return {'success': False, 'msg': self._('Error updating XBlock configuration')}

        # Backwards compatibility: We used to treat "name" as both a user-facing label
        # and a unique identifier for criteria and options.
        # Now we treat "name" as a unique identifier, and we've added an additional "label"
        # field that we display to the user.
        # If the JavaScript editor sends us a criterion or option without a "name"
        # field, we should assign it a unique identifier.
        for criterion in data['criteria']:
            if 'name' not in criterion:
                criterion['name'] = uuid4().hex
            for option in criterion['options']:
                if 'name' not in option:
                    option['name'] = uuid4().hex

        # If example based assessment is enabled, we replace it's xml definition with the dictionary
        # definition we expect for validation and storing.
        for assessment in data['assessments']:
            if assessment['name'] == 'example-based-assessment':
                try:
                    assessment['examples'] = parse_examples_from_xml_str(assessment['examples_xml'])
                except UpdateFromXmlError:
                    return {'success': False, 'msg': self._(
                        u'Validation error: There was an error in the XML definition of the '
                        u'examples provided by the user. Please correct the XML definition before saving.')
                    }
                except KeyError:
                    return {'success': False, 'msg': self._(
                        u'Validation error: No examples were provided for example based assessment.'
                    )}
                    # This is where we default to EASE for problems which are edited in the GUI
                assessment['algorithm_id'] = 'ease'
            if assessment['name'] == 'student-training':
                for example in assessment['examples']:
                    example['answer'] = {'parts': [{'text': text} for text in example['answer']]}


        xblock_validator = validator(self, self._)
        success, msg = xblock_validator(
            create_rubric_dict(data['prompts'], data['criteria']),
            data['assessments'],
            submission_start=data['submission_start'],
            submission_due=data['submission_due'],
            leaderboard_show=data['leaderboard_show']
        )
        if not success:
            return {'success': False, 'msg': self._('Validation error: {error}').format(error=msg)}

        # At this point, all the input data has been validated,
        # so we can safely modify the XBlock fields.
        self.title = data['title']
        self.display_name = data['title']
        self.prompts = data['prompts']
        self.rubric_criteria = data['criteria']
        self.rubric_assessments = data['assessments']
        self.editor_assessments_order = data['editor_assessments_order']
        self.rubric_feedback_prompt = data['feedback_prompt']
        self.rubric_feedback_default_text = data['feedback_default_text']
        self.submission_start = data['submission_start']
        self.submission_due = data['submission_due']
        self.file_upload_type = data['file_upload_type']
        self.white_listed_file_types_string = data['white_listed_file_types']
        self.allow_latex = bool(data['allow_latex'])
        self.leaderboard_show = data['leaderboard_show']

        return {'success': True, 'msg': self._(u'Successfully updated OpenAssessment XBlock')}
Esempio n. 14
0
    def update_editor_context(self, data, suffix=''):
        """
        Update the XBlock's configuration.

        Args:
            data (dict): Data from the request; should have the format described
            in the editor schema.

        Keyword Arguments:
            suffix (str): Not used

        Returns:
            dict with keys 'success' (bool) and 'msg' (str)
        """

        # Validate and sanitize the data using a schema
        # If the data is invalid, this means something is wrong with
        # our JavaScript, so we log an exception.
        try:
            data = EDITOR_UPDATE_SCHEMA(data)
        except MultipleInvalid:
            logger.exception('Editor context is invalid')
            return {
                'success': False,
                'msg': self._('Error updating XBlock configuration')
            }

        # Check that the editor assessment order contains all the assessments.  We are more flexible on example-based.
        if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != (
                set(data['editor_assessments_order']) -
            {'example-based-assessment'}):
            logger.exception(
                'editor_assessments_order does not contain all expected assessment types'
            )
            return {
                'success': False,
                'msg': self._('Error updating XBlock configuration')
            }

        # Backwards compatibility: We used to treat "name" as both a user-facing label
        # and a unique identifier for criteria and options.
        # Now we treat "name" as a unique identifier, and we've added an additional "label"
        # field that we display to the user.
        # If the JavaScript editor sends us a criterion or option without a "name"
        # field, we should assign it a unique identifier.
        for criterion in data['criteria']:
            if 'name' not in criterion:
                criterion['name'] = uuid4().hex
            for option in criterion['options']:
                if 'name' not in option:
                    option['name'] = uuid4().hex

        # If example based assessment is enabled, we replace it's xml definition with the dictionary
        # definition we expect for validation and storing.
        for assessment in data['assessments']:
            if assessment['name'] == 'example-based-assessment':
                try:
                    assessment['examples'] = parse_examples_from_xml_str(
                        assessment['examples_xml'])
                except UpdateFromXmlError:
                    return {
                        'success':
                        False,
                        'msg':
                        self.
                        _(u'Validation error: There was an error in the XML definition of the '
                          u'examples provided by the user. Please correct the XML definition before saving.'
                          )
                    }
                except KeyError:
                    return {
                        'success':
                        False,
                        'msg':
                        self.
                        _(u'Validation error: No examples were provided for example based assessment.'
                          )
                    }
                    # This is where we default to EASE for problems which are edited in the GUI
                assessment['algorithm_id'] = 'ease'
            if assessment['name'] == 'student-training':
                for example in assessment['examples']:
                    example['answer'] = {
                        'parts': [{
                            'text': text
                        } for text in example['answer']]
                    }

        xblock_validator = validator(self, self._)
        success, msg = xblock_validator(
            create_rubric_dict(data['prompts'], data['criteria']),
            data['assessments'],
            submission_start=data['submission_start'],
            submission_due=data['submission_due'],
            leaderboard_show=data['leaderboard_show'])
        if not success:
            return {
                'success': False,
                'msg': self._('Validation error: {error}').format(error=msg)
            }

        # At this point, all the input data has been validated,
        # so we can safely modify the XBlock fields.
        self.title = data['title']
        self.display_name = data['title']
        self.prompts = data['prompts']
        self.rubric_criteria = data['criteria']
        self.rubric_assessments = data['assessments']
        self.editor_assessments_order = data['editor_assessments_order']
        self.rubric_feedback_prompt = data['feedback_prompt']
        self.rubric_feedback_default_text = data['feedback_default_text']
        self.submission_start = data['submission_start']
        self.submission_due = data['submission_due']
        self.allow_file_upload = bool(data['allow_file_upload'])
        self.allow_latex = bool(data['allow_latex'])
        self.leaderboard_show = data['leaderboard_show']

        return {
            'success': True,
            'msg': self._(u'Successfully updated OpenAssessment XBlock')
        }