Exemplo n.º 1
0
    def test_completed_and_past_due(self, xblock):
        # Simulate having completed self assessment
        # Even though the problem is closed, we should still see
        # that we completed the step.
        submission = xblock.create_submission(xblock.get_student_item_dict(),
                                              u"Đøɨn' ɨŧ ŧø đɇȺŧħ")
        self_api.create_assessment(
            submission['uuid'],
            xblock.get_student_item_dict()['student_id'], {
                u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ',
                u'Form': u'Fair'
            }, {}, "Good job!",
            create_rubric_dict(xblock.prompt, xblock.rubric_criteria))

        # This case probably isn't possible, because presumably when we create
        # the self-assessment, the workflow status will be "waiting" or "done".
        # We're checking it anyway to be overly defensive: if the user has made a self-assessment,
        # we ALWAYS show complete, even if the workflow tells us we're still have status 'self'.
        self._assert_path_and_context(
            xblock,
            'openassessmentblock/self/oa_self_complete.html', {
                'self_due': datetime.datetime(2000, 1,
                                              1).replace(tzinfo=pytz.utc),
                'allow_latex': False
            },
            workflow_status='self',
            submission_uuid=submission['uuid'])
Exemplo n.º 2
0
    def get_staff_path_and_context(self):
        """
        Gets the path and context for the staff section of the ORA XBlock.
        """
        context = {}
        path = 'openassessmentblock/staff_debug/staff_debug.html'

        student_item = self.get_student_item_dict()

        # We need to display the new-style locations in the course staff
        # info, even if we're using old-style locations internally,
        # so course staff can use the locations to delete student state.
        context['item_id'] = student_item["item_id"]

        # Calculate how many students are in each step of the workflow
        status_counts, num_submissions = self.get_workflow_status_counts()
        context['status_counts'] = status_counts
        context['num_submissions'] = num_submissions

        # Show the schedule training button if example based assessment is
        # configured, and the current user has admin privileges.
        example_based_assessment = self.get_assessment_module(
            'example-based-assessment')
        display_ai_staff_info = (self.is_admin
                                 and bool(example_based_assessment)
                                 and not self.in_studio_preview)
        context['display_schedule_training'] = display_ai_staff_info
        context['display_reschedule_unfinished_tasks'] = display_ai_staff_info
        if display_ai_staff_info:
            context['classifierset'] = ai_api.get_classifier_set_info(
                create_rubric_dict(self.prompts,
                                   self.rubric_criteria_with_labels),
                example_based_assessment['algorithm_id'],
                student_item['course_id'], student_item['item_id'])

        # Include release/due dates for each step in the problem
        context['step_dates'] = list()

        # Include Latex setting
        context['allow_latex'] = self.allow_latex

        steps = ['submission'] + self.assessment_steps
        for step in steps:

            if step == 'example-based-assessment':
                continue

            # Get the dates as a student would see them
            __, __, start_date, due_date = self.is_closed(step=step,
                                                          course_staff=False)

            context['step_dates'].append({
                'step':
                step,
                'start':
                start_date if start_date > DISTANT_PAST else None,
                'due':
                due_date if due_date < DISTANT_FUTURE else None,
            })
        return path, context
Exemplo n.º 3
0
    def schedule_training(self, data, suffix=''):  # pylint: disable=W0613
        """
        Schedule a new training task for example-based grading.
        """
        assessment = self.get_assessment_module('example-based-assessment')
        student_item_dict = self.get_student_item_dict()

        if assessment:
            examples = assessment["examples"]
            try:
                workflow_uuid = ai_api.train_classifiers(
                    create_rubric_dict(self.prompts, self.rubric_criteria_with_labels),
                    convert_training_examples_list_to_dict(examples),
                    student_item_dict.get('course_id'),
                    student_item_dict.get('item_id'),
                    assessment["algorithm_id"]
                )
                return {
                    'success': True,
                    'workflow_uuid': workflow_uuid,
                    'msg': self._(u"Training scheduled with new Workflow UUID: {uuid}".format(uuid=workflow_uuid))
                }
            except AIError as err:
                return {
                    'success': False,
                    'msg': self._(u"An error occurred scheduling classifier training: {error}".format(error=err))
                }

        else:
            return {
                'success': False,
                'msg': self._(u"Example Based Assessment is not configured for this location.")
            }
Exemplo n.º 4
0
    def schedule_training(self, data, suffix=''):  # pylint: disable=W0613
        """
        Schedule a new training task for example-based grading.
        """
        assessment = self.get_assessment_module('example-based-assessment')
        student_item_dict = self.get_student_item_dict()

        if assessment:
            examples = assessment["examples"]
            try:
                workflow_uuid = ai_api.train_classifiers(
                    create_rubric_dict(self.prompts, self.rubric_criteria_with_labels),
                    convert_training_examples_list_to_dict(examples),
                    student_item_dict.get('course_id'),
                    student_item_dict.get('item_id'),
                    assessment["algorithm_id"]
                )
                return {
                    'success': True,
                    'workflow_uuid': workflow_uuid,
                    'msg': self._(u"Training scheduled with new Workflow UUID: {uuid}".format(uuid=workflow_uuid))
                }
            except AIError as err:
                return {
                    'success': False,
                    'msg': self._(u"An error occurred scheduling classifier training: {error}".format(error=err))
                }

        else:
            return {
                'success': False,
                'msg': self._(u"Example Based Assessment is not configured for this location.")
            }
Exemplo n.º 5
0
    def parse_xml(cls, node, runtime, keys, id_generator):
        """Instantiate XBlock object from runtime XML definition.

        Inherited by XBlock core.

        """
        config = parse_from_xml(node)
        block = runtime.construct_xblock_from_class(cls, keys)

        xblock_validator = validator(block, block._, strict_post_release=False)
        xblock_validator(create_rubric_dict(config['prompt'],
                                            config['rubric_criteria']),
                         config['rubric_assessments'],
                         submission_start=config['submission_start'],
                         submission_due=config['submission_due'],
                         leaderboard_show=config['leaderboard_show'])

        block.rubric_criteria = config['rubric_criteria']
        block.rubric_feedback_prompt = config['rubric_feedback_prompt']
        block.rubric_feedback_default_text = config[
            'rubric_feedback_default_text']
        block.rubric_assessments = config['rubric_assessments']
        block.submission_start = config['submission_start']
        block.submission_due = config['submission_due']
        block.title = config['title']
        block.prompt = config['prompt']
        block.allow_file_upload = config['allow_file_upload']
        block.allow_latex = config['allow_latex']
        block.leaderboard_show = config['leaderboard_show']

        return block
Exemplo n.º 6
0
    def create_workflow(self, submission_uuid):
        """
        Create a new workflow for a student submission.

        Args:
            submission_uuid (str): The UUID of the submission to associate
                with the workflow.

        Returns:
            None

        """
        steps = self._create_step_list()
        ai_module = self.get_assessment_module('example-based-assessment')
        on_init_params = {
            'ai': {
                'rubric':
                create_rubric_dict(self.prompts,
                                   self.rubric_criteria_with_labels),
                'algorithm_id':
                ai_module["algorithm_id"] if ai_module else None
            }
        }
        workflow_api.create_workflow(submission_uuid,
                                     steps,
                                     on_init_params=on_init_params)
Exemplo n.º 7
0
 def test_open_completed_self_assessment(self, xblock):
     # Simulate the workflow being in the self assessment step
     # and we've created a self-assessment
     submission = xblock.create_submission(
         xblock.get_student_item_dict(), (u"Đøɨn' ɨŧ ŧø đɇȺŧħ 1", u"Đøɨn' ɨŧ ŧø đɇȺŧħ 2")
     )
     self_api.create_assessment(
         submission['uuid'],
         xblock.get_student_item_dict()['student_id'],
         {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
         {}, "Good job!",
         create_rubric_dict(xblock.prompts, xblock.rubric_criteria)
     )
     self._assert_path_and_context(
         xblock, 'openassessmentblock/self/oa_self_complete.html',
         {
             'allow_multiple_files': True,
             'allow_latex': False,
             'prompts_type': 'text',
             'user_timezone': pytz.utc,
             'user_language': 'en'
         },
         workflow_status='self',
         submission_uuid=submission['uuid']
     )
Exemplo n.º 8
0
    def test_completed_and_past_due(self, xblock):
        # Simulate having completed self assessment
        # Even though the problem is closed, we should still see
        # that we completed the step.
        submission = xblock.create_submission(
            xblock.get_student_item_dict(), u"Đøɨn' ɨŧ ŧø đɇȺŧħ"
        )
        self_api.create_assessment(
            submission['uuid'],
            xblock.get_student_item_dict()['student_id'],
            {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
            {}, "Good job!",
            create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
        )

        # This case probably isn't possible, because presumably when we create
        # the self-assessment, the workflow status will be "waiting" or "done".
        # We're checking it anyway to be overly defensive: if the user has made a self-assessment,
        # we ALWAYS show complete, even if the workflow tells us we're still have status 'self'.
        self._assert_path_and_context(
            xblock, 'openassessmentblock/self/oa_self_complete.html',
            {'self_due': datetime.datetime(2000, 1, 1).replace(tzinfo=pytz.utc), 'allow_latex': False},
            workflow_status='self',
            submission_uuid=submission['uuid']
        )
Exemplo n.º 9
0
    def parse_xml(cls, node, runtime, keys, id_generator):
        """Instantiate XBlock object from runtime XML definition.

        Inherited by XBlock core.

        """
        config = parse_from_xml(node)
        block = runtime.construct_xblock_from_class(cls, keys)

        xblock_validator = validator(block, block._, strict_post_release=False)
        xblock_validator(
            create_rubric_dict(config['prompts'], config['rubric_criteria']),
            config['rubric_assessments'],
            submission_start=config['submission_start'],
            submission_due=config['submission_due'],
            leaderboard_show=config['leaderboard_show']
        )

        block.rubric_criteria = config['rubric_criteria']
        block.rubric_feedback_prompt = config['rubric_feedback_prompt']
        block.rubric_feedback_default_text = config['rubric_feedback_default_text']
        block.rubric_assessments = config['rubric_assessments']
        block.submission_start = config['submission_start']
        block.submission_due = config['submission_due']
        block.title = config['title']
        block.prompts = config['prompts']
        block.allow_file_upload = config['allow_file_upload']
        block.file_upload_type = config['file_upload_type']
        block.white_listed_file_types_string = config['white_listed_file_types']
        block.allow_latex = config['allow_latex']
        block.leaderboard_show = config['leaderboard_show']

        return block
Exemplo n.º 10
0
    def parse_xml(cls, node, runtime, keys, id_generator):
        """Instantiate XBlock object from runtime XML definition.

        Inherited by XBlock core.

        """
        config = parse_from_xml(node)
        block = runtime.construct_xblock_from_class(cls, keys)

        xblock_validator = validator(block, block._, strict_post_release=False)
        xblock_validator(
            create_rubric_dict(config["prompt"], config["rubric_criteria"]),
            config["rubric_assessments"],
            submission_start=config["submission_start"],
            submission_due=config["submission_due"],
            leaderboard_show=config["leaderboard_show"],
        )

        block.rubric_criteria = config["rubric_criteria"]
        block.rubric_feedback_prompt = config["rubric_feedback_prompt"]
        block.rubric_assessments = config["rubric_assessments"]
        block.submission_start = config["submission_start"]
        block.submission_due = config["submission_due"]
        block.title = config["title"]
        block.prompt = config["prompt"]
        block.allow_file_upload = config["allow_file_upload"]
        block.leaderboard_show = config["leaderboard_show"]

        return block
Exemplo n.º 11
0
    def get_staff_path_and_context(self):
        """
        Gets the path and context for the staff section of the ORA XBlock.
        """
        context = {}
        path = 'openassessmentblock/staff_debug/staff_debug.html'

        student_item = self.get_student_item_dict()

        # We need to display the new-style locations in the course staff
        # info, even if we're using old-style locations internally,
        # so course staff can use the locations to delete student state.
        context['item_id'] = student_item["item_id"]

        # Calculate how many students are in each step of the workflow
        status_counts, num_submissions = self.get_workflow_status_counts()
        context['status_counts'] = status_counts
        context['num_submissions'] = num_submissions

        # Show the schedule training button if example based assessment is
        # configured, and the current user has admin privileges.
        example_based_assessment = self.get_assessment_module('example-based-assessment')
        display_ai_staff_info = (
            self.is_admin and
            bool(example_based_assessment) and
            not self.in_studio_preview
        )
        context['display_schedule_training'] = display_ai_staff_info
        context['display_reschedule_unfinished_tasks'] = display_ai_staff_info
        if display_ai_staff_info:
            context['classifierset'] = ai_api.get_classifier_set_info(
                create_rubric_dict(self.prompt, self.rubric_criteria_with_labels),
                example_based_assessment['algorithm_id'],
                student_item['course_id'],
                student_item['item_id']
            )

        # Include release/due dates for each step in the problem
        context['step_dates'] = list()

        # Include Latex setting
        context['allow_latex'] = self.allow_latex

        steps = ['submission'] + self.assessment_steps
        for step in steps:

            if step == 'example-based-assessment':
                continue

            # Get the dates as a student would see them
            __, __, start_date, due_date = self.is_closed(step=step, course_staff=False)

            context['step_dates'].append({
                'step': step,
                'start': start_date if start_date > DISTANT_PAST else None,
                'due': due_date if due_date < DISTANT_FUTURE else None,
            })
        return path, context
    def submit_ora_test_data(self, course_id, submissions_config):
        """
        Run the submit action. For each specified submission, create the submission, create an assessment if specified,
        and create a lock if specified.
        """
        for ora_config in submissions_config:
            log.info('Creating test submissions for course %s', course_id)
            for submission_config in ora_config['submissions']:
                log.info("Creating submission for user %s",
                         submission_config['username'])
                student_item = self.student_item(submission_config['username'],
                                                 course_id,
                                                 ora_config['displayName'])
                # Submissions consist of username, a line break, and then some lorem
                text_response = submission_config[
                    'username'] + '\n' + generate_lorem_sentences()
                submission = sub_api.create_submission(
                    student_item, {'parts': [{
                        'text': text_response
                    }]})
                workflow_api.create_workflow(submission['uuid'], ['staff'])
                workflow_api.update_from_assessments(submission['uuid'], None)
                log.info("Created submission %s for user %s",
                         submission['uuid'], submission_config['username'])

                if submission_config['lockOwner']:
                    log.info("Creating lock on submission %s owned by %s",
                             submission['uuid'],
                             submission_config['lockOwner'])
                    SubmissionGradingLock.claim_submission_lock(
                        submission['uuid'], self.username_to_anonymous_user_id[
                            submission_config['lockOwner']])

                if submission_config['gradeData']:
                    grade_data = submission_config['gradeData']
                    log.info(
                        "Creating assessment from user %s for submission %s",
                        grade_data['gradedBy'], submission['uuid'])
                    block = self.display_name_to_block[
                        ora_config['displayName']]
                    rubric_dict = create_rubric_dict(
                        block.prompts, block.rubric_criteria_with_labels)
                    options_selected, criterion_feedback = self.api_format_criteria(
                        grade_data['criteria'], rubric_dict)
                    staff_api.create_assessment(
                        submission['uuid'],
                        self.username_to_anonymous_user_id[
                            grade_data['gradedBy']],
                        options_selected,
                        criterion_feedback,
                        grade_data['overallFeedback'],
                        rubric_dict,
                    )
                    workflow_api.update_from_assessments(
                        submission['uuid'], None)
Exemplo n.º 13
0
 def test_open_completed_self_assessment(self, xblock):
     # Simulate the workflow being in the self assessment step
     # and we've created a self-assessment
     submission = xblock.create_submission(xblock.get_student_item_dict(),
                                           u"Đøɨn' ɨŧ ŧø đɇȺŧħ")
     self_api.create_assessment(
         submission['uuid'],
         xblock.get_student_item_dict()['student_id'], {
             u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ',
             u'Form': u'Fair'
         }, {}, "Good job!",
         create_rubric_dict(xblock.prompt, xblock.rubric_criteria))
     self._assert_path_and_context(
         xblock,
         'openassessmentblock/self/oa_self_complete.html', {},
         workflow_status='self',
         submission_uuid=submission['uuid'])
Exemplo n.º 14
0
 def test_open_completed_self_assessment(self, xblock):
     # Simulate the workflow being in the self assessment step
     # and we've created a self-assessment
     submission = xblock.create_submission(
         xblock.get_student_item_dict(), u"Đøɨn' ɨŧ ŧø đɇȺŧħ"
     )
     self_api.create_assessment(
         submission['uuid'],
         xblock.get_student_item_dict()['student_id'],
         {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
         {}, "Good job!",
         create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
     )
     self._assert_path_and_context(
         xblock, 'openassessmentblock/self/oa_self_complete.html', {'allow_latex': False},
         workflow_status='self',
         submission_uuid=submission['uuid']
     )
Exemplo n.º 15
0
    def parse_xml(cls, node, runtime, keys, id_generator):
        """Instantiate XBlock object from runtime XML definition.

        Inherited by XBlock core.

        """
        config = parse_from_xml(node)
        block = runtime.construct_xblock_from_class(cls, keys)

        xblock_validator = validator(block, block._, strict_post_release=False)
        xblock_validator(create_rubric_dict(config['prompts'],
                                            config['rubric_criteria']),
                         config['rubric_assessments'],
                         submission_start=config['submission_start'],
                         submission_due=config['submission_due'],
                         leaderboard_show=config['leaderboard_show'])

        block.rubric_criteria = config['rubric_criteria']
        block.rubric_feedback_prompt = config['rubric_feedback_prompt']
        block.rubric_feedback_default_text = config[
            'rubric_feedback_default_text']
        block.rubric_assessments = config['rubric_assessments']
        block.submission_start = config['submission_start']
        block.submission_due = config['submission_due']
        block.title = config['title']
        block.prompts = config['prompts']
        block.prompts_type = config['prompts_type']
        block.text_response = config['text_response']
        block.text_response_editor = config['text_response_editor']
        block.file_upload_response = config['file_upload_response']
        block.allow_file_upload = config['allow_file_upload']
        block.file_upload_type = config['file_upload_type']
        block.white_listed_file_types_string = config[
            'white_listed_file_types']
        block.allow_multiple_files = config['allow_multiple_files']
        block.allow_latex = config['allow_latex']
        block.leaderboard_show = config['leaderboard_show']
        block.group_access = config['group_access']
        block.teams_enabled = config['teams_enabled']
        block.selected_teamset_id = config['selected_teamset_id']
        block.show_rubric_during_response = config[
            'show_rubric_during_response']
        return block
Exemplo n.º 16
0
    def create_workflow(self, submission_uuid):
        """
        Create a new workflow for a student submission.

        Args:
            submission_uuid (str): The UUID of the submission to associate
                with the workflow.

        Returns:
            None

        """
        steps = self._create_step_list()
        ai_module = self.get_assessment_module('example-based-assessment')
        on_init_params = {
            'ai': {
                'rubric': create_rubric_dict(self.prompt, self.rubric_criteria_with_labels),
                'algorithm_id': ai_module["algorithm_id"] if ai_module else None
            }
        }
        workflow_api.create_workflow(submission_uuid, steps, on_init_params=on_init_params)
Exemplo n.º 17
0
    def update_editor_context(self, data, suffix=''):  # pylint: disable=unused-argument
        """
        Update the XBlock's configuration.

        Args:
            data (dict): Data from the request; should have the format described
            in the editor schema.

        Keyword Arguments:
            suffix (str): Not used

        Returns:
            dict with keys 'success' (bool) and 'msg' (str)
        """

        # Validate and sanitize the data using a schema
        # If the data is invalid, this means something is wrong with
        # our JavaScript, so we log an exception.
        try:
            data = EDITOR_UPDATE_SCHEMA(data)
        except MultipleInvalid:
            logger.exception('Editor context is invalid')
            return {
                'success': False,
                'msg': self._('Error updating XBlock configuration')
            }

        # Check that the editor assessment order contains all the assessments.
        current_order = set(data['editor_assessments_order'])
        if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != current_order:
            # Backwards compatibility: "staff-assessment" may not be present.
            # If that is the only problem with this data, just add it manually and continue.
            if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) == current_order | {
                    'staff-assessment'
            }:
                data['editor_assessments_order'].append('staff-assessment')
                logger.info(
                    'Backwards compatibility: editor_assessments_order now contains staff-assessment'
                )
            else:
                logger.exception(
                    'editor_assessments_order does not contain all expected assessment types'
                )
                return {
                    'success': False,
                    'msg': self._('Error updating XBlock configuration')
                }

        if not data['text_response'] and not data['file_upload_response']:
            return {
                'success':
                False,
                'msg':
                self.
                _("Error: both text and file upload responses can't be disabled"
                  )
            }
        if not data['text_response'] and data[
                'file_upload_response'] == 'optional':
            return {
                'success':
                False,
                'msg':
                self.
                _("Error: in case if text response is disabled file upload response must be required"
                  )
            }
        if not data['file_upload_response'] and data[
                'text_response'] == 'optional':
            return {
                'success':
                False,
                'msg':
                self.
                _("Error: in case if file upload response is disabled text response must be required"
                  )
            }

        # Backwards compatibility: We used to treat "name" as both a user-facing label
        # and a unique identifier for criteria and options.
        # Now we treat "name" as a unique identifier, and we've added an additional "label"
        # field that we display to the user.
        # If the JavaScript editor sends us a criterion or option without a "name"
        # field, we should assign it a unique identifier.
        for criterion in data['criteria']:
            if 'name' not in criterion:
                criterion['name'] = uuid4().hex
            for option in criterion['options']:
                if 'name' not in option:
                    option['name'] = uuid4().hex

        xblock_validator = validator(self, self._)
        success, msg = xblock_validator(
            create_rubric_dict(data['prompts'], data['criteria']),
            data['assessments'],
            submission_start=data['submission_start'],
            submission_due=data['submission_due'],
            leaderboard_show=data['leaderboard_show'])
        if not success:
            return {
                'success': False,
                'msg': self._('Validation error: {error}').format(error=msg)
            }

        # At this point, all the input data has been validated,
        # so we can safely modify the XBlock fields.
        self.title = data['title']
        self.display_name = data['title']
        self.prompts = data['prompts']
        self.prompts_type = data['prompts_type']
        self.rubric_criteria = data['criteria']
        self.rubric_assessments = data['assessments']
        self.editor_assessments_order = data['editor_assessments_order']
        self.rubric_feedback_prompt = data['feedback_prompt']
        self.rubric_feedback_default_text = data['feedback_default_text']
        self.submission_start = data['submission_start']
        self.submission_due = data['submission_due']
        self.text_response = data['text_response']
        self.file_upload_response = data['file_upload_response']
        if data['file_upload_response']:
            self.file_upload_type = data['file_upload_type']
            self.white_listed_file_types_string = data[
                'white_listed_file_types']
        else:
            self.file_upload_type = None
            self.white_listed_file_types_string = None
        self.allow_latex = bool(data['allow_latex'])
        self.leaderboard_show = data['leaderboard_show']

        return {
            'success': True,
            'msg': self._(u'Successfully updated OpenAssessment XBlock')
        }
Exemplo n.º 18
0
    def update_editor_context(self, data, suffix=''):
        """
        Update the XBlock's configuration.

        Args:
            data (dict): Data from the request; should have the format described
            in the editor schema.

        Keyword Arguments:
            suffix (str): Not used

        Returns:
            dict with keys 'success' (bool) and 'msg' (str)
        """

        # Validate and sanitize the data using a schema
        # If the data is invalid, this means something is wrong with
        # our JavaScript, so we log an exception.
        try:
            data = EDITOR_UPDATE_SCHEMA(data)
        except MultipleInvalid:
            logger.exception('Editor context is invalid')
            return {
                'success': False,
                'msg': self._('Error updating XBlock configuration')
            }

        # Check that the editor assessment order contains all the assessments.  We are more flexible on example-based.
        if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != (
                set(data['editor_assessments_order']) -
            {'example-based-assessment'}):
            logger.exception(
                'editor_assessments_order does not contain all expected assessment types'
            )
            return {
                'success': False,
                'msg': self._('Error updating XBlock configuration')
            }

        # Backwards compatibility: We used to treat "name" as both a user-facing label
        # and a unique identifier for criteria and options.
        # Now we treat "name" as a unique identifier, and we've added an additional "label"
        # field that we display to the user.
        # If the JavaScript editor sends us a criterion or option without a "name"
        # field, we should assign it a unique identifier.
        for criterion in data['criteria']:
            if 'name' not in criterion:
                criterion['name'] = uuid4().hex
            for option in criterion['options']:
                if 'name' not in option:
                    option['name'] = uuid4().hex

        # If example based assessment is enabled, we replace it's xml definition with the dictionary
        # definition we expect for validation and storing.
        for assessment in data['assessments']:
            if assessment['name'] == 'example-based-assessment':
                try:
                    assessment['examples'] = parse_examples_from_xml_str(
                        assessment['examples_xml'])
                except UpdateFromXmlError:
                    return {
                        'success':
                        False,
                        'msg':
                        self.
                        _(u'Validation error: There was an error in the XML definition of the '
                          u'examples provided by the user. Please correct the XML definition before saving.'
                          )
                    }
                except KeyError:
                    return {
                        'success':
                        False,
                        'msg':
                        self.
                        _(u'Validation error: No examples were provided for example based assessment.'
                          )
                    }
                    # This is where we default to EASE for problems which are edited in the GUI
                assessment['algorithm_id'] = 'ease'
            if assessment['name'] == 'student-training':
                for example in assessment['examples']:
                    example['answer'] = {
                        'parts': [{
                            'text': text
                        } for text in example['answer']]
                    }

        xblock_validator = validator(self, self._)
        success, msg = xblock_validator(
            create_rubric_dict(data['prompts'], data['criteria']),
            data['assessments'],
            submission_start=data['submission_start'],
            submission_due=data['submission_due'],
            leaderboard_show=data['leaderboard_show'])
        if not success:
            return {
                'success': False,
                'msg': self._('Validation error: {error}').format(error=msg)
            }

        # At this point, all the input data has been validated,
        # so we can safely modify the XBlock fields.
        self.title = data['title']
        self.display_name = data['title']
        self.prompts = data['prompts']
        self.rubric_criteria = data['criteria']
        self.rubric_assessments = data['assessments']
        self.editor_assessments_order = data['editor_assessments_order']
        self.rubric_feedback_prompt = data['feedback_prompt']
        self.rubric_feedback_default_text = data['feedback_default_text']
        self.submission_start = data['submission_start']
        self.submission_due = data['submission_due']
        self.allow_file_upload = bool(data['allow_file_upload'])
        self.allow_latex = bool(data['allow_latex'])
        self.leaderboard_show = data['leaderboard_show']

        return {
            'success': True,
            'msg': self._(u'Successfully updated OpenAssessment XBlock')
        }
Exemplo n.º 19
0
    def update_editor_context(self, data, suffix=''):
        """
        Update the XBlock's configuration.

        Args:
            data (dict): Data from the request; should have the format described
            in the editor schema.

        Keyword Arguments:
            suffix (str): Not used

        Returns:
            dict with keys 'success' (bool) and 'msg' (str)
        """

        # Validate and sanitize the data using a schema
        # If the data is invalid, this means something is wrong with
        # our JavaScript, so we log an exception.
        try:
            data = EDITOR_UPDATE_SCHEMA(data)
        except MultipleInvalid:
            logger.exception('Editor context is invalid')
            return {'success': False, 'msg': self._('Error updating XBlock configuration')}

        # Check that the editor assessment order contains all the assessments.  We are more flexible on example-based.
        if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != (set(data['editor_assessments_order']) - {'example-based-assessment'}):
            logger.exception('editor_assessments_order does not contain all expected assessment types')
            return {'success': False, 'msg': self._('Error updating XBlock configuration')}

        # Backwards compatibility: We used to treat "name" as both a user-facing label
        # and a unique identifier for criteria and options.
        # Now we treat "name" as a unique identifier, and we've added an additional "label"
        # field that we display to the user.
        # If the JavaScript editor sends us a criterion or option without a "name"
        # field, we should assign it a unique identifier.
        for criterion in data['criteria']:
            if 'name' not in criterion:
                criterion['name'] = uuid4().hex
            for option in criterion['options']:
                if 'name' not in option:
                    option['name'] = uuid4().hex

        # If example based assessment is enabled, we replace it's xml definition with the dictionary
        # definition we expect for validation and storing.
        for assessment in data['assessments']:
            if assessment['name'] == 'example-based-assessment':
                try:
                    assessment['examples'] = parse_examples_from_xml_str(assessment['examples_xml'])
                except UpdateFromXmlError:
                    return {'success': False, 'msg': self._(
                        u'Validation error: There was an error in the XML definition of the '
                        u'examples provided by the user. Please correct the XML definition before saving.')
                    }
                except KeyError:
                    return {'success': False, 'msg': self._(
                        u'Validation error: No examples were provided for example based assessment.'
                    )}
                    # This is where we default to EASE for problems which are edited in the GUI
                assessment['algorithm_id'] = 'ease'
            if assessment['name'] == 'student-training':
                for example in assessment['examples']:
                    example['answer'] = {'parts': [{'text': text} for text in example['answer']]}


        xblock_validator = validator(self, self._)
        success, msg = xblock_validator(
            create_rubric_dict(data['prompts'], data['criteria']),
            data['assessments'],
            submission_start=data['submission_start'],
            submission_due=data['submission_due'],
            leaderboard_show=data['leaderboard_show']
        )
        if not success:
            return {'success': False, 'msg': self._('Validation error: {error}').format(error=msg)}

        # At this point, all the input data has been validated,
        # so we can safely modify the XBlock fields.
        self.title = data['title']
        self.display_name = data['title']
        self.prompts = data['prompts']
        self.rubric_criteria = data['criteria']
        self.rubric_assessments = data['assessments']
        self.editor_assessments_order = data['editor_assessments_order']
        self.rubric_feedback_prompt = data['feedback_prompt']
        self.rubric_feedback_default_text = data['feedback_default_text']
        self.submission_start = data['submission_start']
        self.submission_due = data['submission_due']
        self.file_upload_type = data['file_upload_type']
        self.white_listed_file_types_string = data['white_listed_file_types']
        self.allow_latex = bool(data['allow_latex'])
        self.leaderboard_show = data['leaderboard_show']

        return {'success': True, 'msg': self._(u'Successfully updated OpenAssessment XBlock')}
Exemplo n.º 20
0
    def update_editor_context(self, data, suffix=''):  # pylint: disable=unused-argument
        """
        Update the XBlock's configuration.

        Args:
            data (dict): Data from the request; should have the format described
            in the editor schema.

        Keyword Arguments:
            suffix (str): Not used

        Returns:
            dict with keys 'success' (bool) and 'msg' (str)
        """

        # Validate and sanitize the data using a schema
        # If the data is invalid, this means something is wrong with
        # our JavaScript, so we log an exception.
        try:
            data = EDITOR_UPDATE_SCHEMA(data)
        except MultipleInvalid:
            logger.exception('Editor context is invalid')
            return {'success': False, 'msg': self._('Error updating XBlock configuration')}

        # Check that the editor assessment order contains all the assessments.
        current_order = set(data['editor_assessments_order'])
        if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != current_order:
            # Backwards compatibility: "staff-assessment" may not be present.
            # If that is the only problem with this data, just add it manually and continue.
            if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) == current_order | {'staff-assessment'}:
                data['editor_assessments_order'].append('staff-assessment')
                logger.info('Backwards compatibility: editor_assessments_order now contains staff-assessment')
            else:
                logger.exception('editor_assessments_order does not contain all expected assessment types')
                return {'success': False, 'msg': self._('Error updating XBlock configuration')}

        if not data['text_response'] and not data['file_upload_response']:
            return {'success': False, 'msg': self._("Error: both text and file upload responses can't be disabled")}
        if not data['text_response'] and data['file_upload_response'] == 'optional':
            return {'success': False,
                    'msg': self._("Error: in case if text response is disabled file upload response must be required")}
        if not data['file_upload_response'] and data['text_response'] == 'optional':
            return {'success': False,
                    'msg': self._("Error: in case if file upload response is disabled text response must be required")}

        # Backwards compatibility: We used to treat "name" as both a user-facing label
        # and a unique identifier for criteria and options.
        # Now we treat "name" as a unique identifier, and we've added an additional "label"
        # field that we display to the user.
        # If the JavaScript editor sends us a criterion or option without a "name"
        # field, we should assign it a unique identifier.
        for criterion in data['criteria']:
            if 'name' not in criterion:
                criterion['name'] = uuid4().hex
            for option in criterion['options']:
                if 'name' not in option:
                    option['name'] = uuid4().hex

        xblock_validator = validator(self, self._)
        success, msg = xblock_validator(
            create_rubric_dict(data['prompts'], data['criteria']),
            data['assessments'],
            submission_start=data['submission_start'],
            submission_due=data['submission_due'],
            leaderboard_show=data['leaderboard_show']
        )
        if not success:
            return {'success': False, 'msg': self._('Validation error: {error}').format(error=msg)}

        # At this point, all the input data has been validated,
        # so we can safely modify the XBlock fields.
        self.title = data['title']
        self.display_name = data['title']
        self.prompts = data['prompts']
        self.prompts_type = data['prompts_type']
        self.rubric_criteria = data['criteria']
        self.rubric_assessments = data['assessments']
        self.editor_assessments_order = data['editor_assessments_order']
        self.rubric_feedback_prompt = data['feedback_prompt']
        self.rubric_feedback_default_text = data['feedback_default_text']
        self.submission_start = data['submission_start']
        self.submission_due = data['submission_due']
        self.text_response = data['text_response']
        self.file_upload_response = data['file_upload_response']
        if data['file_upload_response']:
            self.file_upload_type = data['file_upload_type']
            self.white_listed_file_types_string = data['white_listed_file_types']
        else:
            self.file_upload_type = None
            self.white_listed_file_types_string = None
        self.allow_latex = bool(data['allow_latex'])
        self.leaderboard_show = data['leaderboard_show']

        return {'success': True, 'msg': self._(u'Successfully updated OpenAssessment XBlock')}
Exemplo n.º 21
0
    def get_student_info_path_and_context(self, student_id):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        Args:
            student_id (unicode): The ID of the student to report.

        """
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps
        student_item = self.get_student_item_dict()
        scores = {}
        problem_closed = None

        if student_id:
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission_uuid = submissions[0]['uuid']
                submission = submissions[0]

                if 'file_key' in submission.get('answer', {}):
                    file_key = submission['answer']['file_key']

                    try:
                        submission['image_url'] = file_api.get_download_url(file_key)
                    except file_api.FileUploadError:
                        # Log the error, but do not prevent the rest of the student info
                        # from being displayed.
                        msg = (
                            u"Could not retrieve image URL for staff debug page.  "
                            u"The student ID is '{student_id}', and the file key is {file_key}"
                        ).format(student_id=student_id, file_key=file_key)
                        logger.exception(msg)

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)

            # Get the data we need for instructor override of the student's score
            rubric_dict = create_rubric_dict(self.prompt, self.rubric_criteria_with_labels)
            scores = peer_api.get_data_for_override_score(
                submission_uuid,
                student_item,
                rubric_dict,
            )
            problem_closed, dummy0, dummy1, dummy2 = self.is_closed(step='peer-assessment', course_staff=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
            'scores': scores,
            'problem_closed': problem_closed,
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context