Beispiel #1
0
class ThirdMixin(XBlockMixin):
    """Test class for mixin ordering."""
    field = Integer(default=3)
Beispiel #2
0
class McqsXBlock(XBlock, StudioEditableXBlockMixin):
    """
    Multiple Choice Questions XBlock
    """
    display_name = String(default='MCQS')
    block_name = String(default='MCQS')
    editable_fields = ('question', 'choices', 'correct_choice', 'hint')

    question = String(
        display_name='Question',
        default=
        'Which of the following languages is more suited to a structured program?',
        scope=Scope.content,
        help='Question statement')
    choices = List(display_name='Choices',
                   default=['PL/1', 'FORTRAN', 'BASIC', 'PASCAL'],
                   scope=Scope.content,
                   help='Choices for MCQs')
    correct_choice = Integer(
        display_name='Correct Choice',
        default=4,
        scope=Scope.content,
        help=
        'Index of correct choice among given choices. For example if third choice is correct, enter 3'
    )
    hint = String(display_name='Hint',
                  default='Think hard!',
                  scope=Scope.content,
                  help='Hint for the User')

    user_choice = Integer(default=None,
                          scope=Scope.user_state,
                          help='Index of choice selected by User')
    correct = Boolean(default=False,
                      scope=Scope.user_state,
                      help='User selection is correct or not')

    def resource_string(self, path):
        """
        Handy helper for getting resources from our kit.
        """
        data = pkg_resources.resource_string(__name__, path)
        return data.decode("utf8")

    def student_view(self, context=None):
        """
        The primary view of the McqsXBlock, shown to students
        when viewing courses.
        """
        if context is None:
            context = {}

        context.update({'self': self})

        html = Template(self.resource_string("static/html/mcqs.html")).render(
            Context(context))
        frag = Fragment(html)
        frag.add_css(self.resource_string("static/css/mcqs.css"))
        frag.add_javascript(self.resource_string("static/js/src/mcqs.js"))
        frag.initialize_js('McqsXBlock')
        return frag

    def validate_field_data(self, validation, data):
        """
        Perform validation on Studio submitted data
        """
        if not data.question.strip():
            validation.add(
                ValidationMessage(ValidationMessage.ERROR,
                                  u"Question is required."))

        # there must be two choices to choose from
        if not data.choices or len(data.choices) < 2:
            validation.add(
                ValidationMessage(ValidationMessage.ERROR,
                                  u"Please enter atleast two choices"))

        if data.correct_choice not in range(1, len(data.choices) + 1):
            validation.add(
                ValidationMessage(
                    ValidationMessage.ERROR,
                    u"Correct choice must be from 1 to {}".format(
                        len(data.choices))))

    @XBlock.json_handler
    def check_answer(self, data, suffix=''):
        """
        Check answer for submitted response
        """
        response = dict(correct=False)

        ans = int(data.get('ans', 0))

        # store user response
        self.user_choice = ans

        if ans == self.correct_choice:
            self.correct = True
            response['correct'] = True
        else:
            response['correct_choice'] = self.correct_choice

        return response

    @XBlock.json_handler
    def get_hint(self, data, suffix=''):
        """
        Give hint for the question
        """
        response = dict(hint=self.hint)

        return response

    @staticmethod
    def workbench_scenarios():
        """
        A canned scenario for display in the workbench.
        """
        return [
            ("McqsXBlock", """<mcqs/>
             """),
            ("Multiple McqsXBlock", """<vertical_demo>
                <mcqs/>
                <mcqs/>
                <mcqs/>
                </vertical_demo>
             """),
        ]
class MentoringBlock(BaseMentoringBlock, StudioContainerWithNestedXBlocksMixin,
                     StepParentMixin):
    """
    An XBlock providing mentoring capabilities

    Composed of text, answers input fields, and a set of MRQ/MCQ with advices.
    A set of conditions on the provided answers and MCQ/MRQ choices will determine if the
    student is a) provided mentoring advices and asked to alter his answer, or b) is given the
    ok to continue.
    """
    # Content
    MENTORING_MODES = ('standard', 'assessment')
    mode = String(display_name=_("Mode"),
                  help=_("Mode of the mentoring. 'standard' or 'assessment'"),
                  default='standard',
                  scope=Scope.content,
                  values=MENTORING_MODES)
    followed_by = String(
        display_name=_("Followed by"),
        help=
        _("url_name of the step after the current mentoring block in workflow."
          ),
        default=None,
        scope=Scope.content)
    enforce_dependency = Boolean(
        display_name=_("Enforce Dependency"),
        help=_("Should the next step be the current block to complete?"),
        default=False,
        scope=Scope.content,
        enforce_type=True)
    display_submit = Boolean(display_name=_("Show Submit Button"),
                             help=_("Allow submission of the current block?"),
                             default=True,
                             scope=Scope.content,
                             enforce_type=True)
    xml_content = String(
        display_name=_("XML content"),
        help=
        _("Not used for version 2. This field is here only to preserve the data needed to upgrade from v1 to v2."
          ),
        default='',
        scope=Scope.content,
        multiline_editor=True)

    # Settings
    display_name = String(display_name=_("Title (Display name)"),
                          help=_("Title to display"),
                          default=_("Problem Builder"),
                          scope=Scope.settings)
    feedback_label = String(display_name=_("Feedback Header"),
                            help=_("Header for feedback messages"),
                            default=_("Feedback"),
                            scope=Scope.content)

    # User state
    attempted = Boolean(
        # Has the student attempted this mentoring step?
        default=False,
        scope=Scope.user_state
        # TODO: Does anything use this 'attempted' field? May want to delete it.
    )
    completed = Boolean(
        # Has the student completed this mentoring step?
        default=False,
        scope=Scope.user_state)
    step = Integer(
        # Keep track of the student assessment progress.
        default=0,
        scope=Scope.user_state,
        enforce_type=True)
    student_results = List(
        # Store results of student choices.
        default=[],
        scope=Scope.user_state)
    extended_feedback = Boolean(help=_(
        "Show extended feedback details when all attempts are used up."),
                                default=False,
                                Scope=Scope.content)

    # Global user state
    next_step = String(
        # url_name of the next step the student must complete (global to all blocks)
        default='mentoring_first',
        scope=Scope.preferences)

    editable_fields = ('display_name', 'followed_by', 'max_attempts',
                       'enforce_dependency', 'display_submit',
                       'feedback_label', 'weight', 'extended_feedback')

    @property
    def allowed_nested_blocks(self):
        """
        Returns a list of allowed nested XBlocks. Each item can be either
        * An XBlock class
        * A NestedXBlockSpec

        If XBlock class is used it is assumed that this XBlock is enabled and allows multiple instances.
        NestedXBlockSpec allows explicitly setting disabled/enabled state, disabled reason (if any) and single/multiple
        instances
        """
        additional_blocks = []
        try:
            from xmodule.video_module.video_module import VideoDescriptor
            additional_blocks.append(
                NestedXBlockSpec(VideoDescriptor,
                                 category='video',
                                 label=_(u"Video")))
        except ImportError:
            pass
        try:
            from imagemodal import ImageModal
            additional_blocks.append(
                NestedXBlockSpec(ImageModal,
                                 category='imagemodal',
                                 label=_(u"Image Modal")))
        except ImportError:
            pass

        message_block_shims = [
            NestedXBlockSpec(
                MentoringMessageBlock,
                category='pb-message',
                boilerplate=message_type,
                label=get_message_label(message_type),
            ) for message_type in (
                'completed',
                'incomplete',
                'max_attempts_reached',
            )
        ]

        if self.is_assessment:
            message_block_shims.append(
                NestedXBlockSpec(
                    MentoringMessageBlock,
                    category='pb-message',
                    boilerplate='on-assessment-review',
                    label=get_message_label('on-assessment-review'),
                ))

        return [
            NestedXBlockSpec(AnswerBlock, boilerplate='studio_default'),
            MCQBlock, RatingBlock, MRQBlock, CompletionBlock,
            NestedXBlockSpec(None, category="html", label=self._("HTML")),
            AnswerRecapBlock, MentoringTableBlock, PlotBlock, SliderBlock
        ] + additional_blocks + message_block_shims

    @property
    def is_assessment(self):
        """ Checks if mentoring XBlock is in assessment mode """
        return self.mode == 'assessment'

    def get_question_number(self, question_id):
        """
        Get the step number of the question id
        """
        for child_id in self.children:
            question = self.runtime.get_block(child_id)
            if isinstance(question, QuestionMixin) and (question.name
                                                        == question_id):
                return question.step_number
        raise ValueError(
            "Question ID in answer set not a step of this Mentoring Block!")

    def answer_mapper(self, answer_status):
        """
        Create a JSON-dumpable object with readable key names from a list of student answers.
        """
        answer_map = []
        for answer in self.student_results:
            if answer[1]['status'] == answer_status:
                try:
                    answer_map.append({
                        'number':
                        self.get_question_number(answer[0]),
                        'id':
                        answer[0],
                        'details':
                        answer[1],
                    })
                except ValueError:
                    pass  # The question has been deleted since the student answered it.
        return answer_map

    @property
    def score(self):
        """Compute the student score taking into account the weight of each step."""
        steps = self.steps
        steps_map = {q.name: q for q in steps}
        total_child_weight = sum(float(step.weight) for step in steps)
        if total_child_weight == 0:
            return Score(0, 0, [], [], [])
        points_earned = 0
        for q_name, q_details in self.student_results:
            question = steps_map.get(q_name)
            if question:
                points_earned += q_details['score'] * question.weight
        score = points_earned / total_child_weight
        correct = self.answer_mapper(CORRECT)
        incorrect = self.answer_mapper(INCORRECT)
        partially_correct = self.answer_mapper(PARTIAL)

        return Score(score, int(round(score * 100)), correct, incorrect,
                     partially_correct)

    def student_view(self, context):
        from .questionnaire import QuestionnaireAbstractBlock  # Import here to avoid circular dependency

        # Migrate stored data if necessary
        self.migrate_fields()

        # Validate self.step:
        num_steps = len(self.steps)
        if self.step > num_steps:
            self.step = num_steps

        fragment = Fragment()
        child_content = u""

        mcq_hide_previous_answer = self.get_option(
            'pb_mcq_hide_previous_answer')

        for child_id in self.children:
            child = self.runtime.get_block(child_id)
            if child is None:  # child should not be None but it can happen due to bugs or permission issues
                child_content += u"<p>[{}]</p>".format(
                    self._(u"Error: Unable to load child component."))
            elif not isinstance(child, MentoringMessageBlock):
                try:
                    if self.is_assessment and isinstance(child, QuestionMixin):
                        child_fragment = child.render('assessment_step_view',
                                                      context)
                    else:
                        if mcq_hide_previous_answer and isinstance(
                                child, QuestionnaireAbstractBlock):
                            context['hide_prev_answer'] = True
                        else:
                            context['hide_prev_answer'] = False
                        child_fragment = child.render('mentoring_view',
                                                      context)
                except NoSuchViewError:
                    if child.scope_ids.block_type == 'html' and getattr(
                            self.runtime, 'is_author_mode', False):
                        # html block doesn't support mentoring_view, and if we use student_view Studio will wrap
                        # it in HTML that we don't want in the preview. So just render its HTML directly:
                        child_fragment = Fragment(child.data)
                    else:
                        child_fragment = child.render('student_view', context)
                fragment.add_frag_resources(child_fragment)
                child_content += child_fragment.content

        fragment.add_content(
            loader.render_template(
                'templates/html/mentoring.html', {
                    'self':
                    self,
                    'title':
                    self.display_name,
                    'show_title':
                    self.show_title,
                    'child_content':
                    child_content,
                    'missing_dependency_url':
                    self.has_missing_dependency and self.next_step_url,
                }))
        fragment.add_css_url(
            self.runtime.local_resource_url(self,
                                            'public/css/problem-builder.css'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(
                self, 'public/js/vendor/underscore-min.js'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/util.js'))
        js_file = 'public/js/mentoring_{}_view.js'.format(
            'assessment' if self.is_assessment else 'standard')
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, js_file))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/mentoring.js'))
        fragment.add_resource(
            loader.load_unicode('templates/html/mentoring_attempts.html'),
            "text/html")
        if self.is_assessment:
            fragment.add_resource(
                loader.load_unicode(
                    'templates/html/mentoring_assessment_templates.html'),
                "text/html")

        self.include_theme_files(fragment)
        # Workbench doesn't have font awesome, so add it:
        if WorkbenchRuntime and isinstance(self.runtime, WorkbenchRuntime):
            fragment.add_css_url(
                '//maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css'
            )

        fragment.initialize_js('MentoringBlock')

        if not self.display_submit:
            self.runtime.publish(self, 'progress', {})

        return fragment

    def migrate_fields(self):
        """
        Migrate data stored in the fields, when a format change breaks backward-compatibility with
        previous data formats
        """
        # Partial answers replaced the `completed` with `status` in `self.student_results`
        if self.student_results and 'completed' in self.student_results[0][1]:
            # Rename the field and use the new value format (text instead of boolean)
            for result in self.student_results:
                result[1]['status'] = 'correct' if result[1][
                    'completed'] else 'incorrect'
                del result[1]['completed']

    @property
    def additional_publish_event_data(self):
        return {
            'user_id': self.scope_ids.user_id,
            'component_id': self.url_name,
        }

    @property
    def has_missing_dependency(self):
        """
        Returns True if the student needs to complete another step before being able to complete
        the current one, and False otherwise
        """
        return self.enforce_dependency and (not self.completed) and (
            self.next_step != self.url_name)

    @property
    def next_step_url(self):
        """
        Returns the URL of the next step's page
        """
        return '/jump_to_id/{}'.format(self.next_step)

    @property
    def hide_feedback(self):
        return self.get_option("pb_hide_feedback_if_attempts_remain"
                               ) and not self.max_attempts_reached

    def get_message(self, completed):
        """
        Get the message to display to a student following a submission in normal mode.
        """
        if completed:
            # Student has achieved a perfect score
            return self.get_message_content('completed')
        elif self.max_attempts_reached:
            # Student has not achieved a perfect score and cannot try again
            return self.get_message_content('max_attempts_reached')
        else:
            # Student did not achieve a perfect score but can try again:
            return self.get_message_content('incomplete')

    @property
    def assessment_message(self):
        """
        Get the message to display to a student following a submission in assessment mode.
        """
        if not self.max_attempts_reached:
            return self.get_message_content('on-assessment-review',
                                            or_default=True)
        else:
            return None

    @property
    def review_tips(self):
        """ Get review tips, shown for wrong answers in assessment mode. """
        if not self.is_assessment or self.step != len(self.step_ids):
            return [
            ]  # Review tips are only used in assessment mode, and only on the last step.
        review_tips = []
        status_cache = dict(self.student_results)
        for child in self.steps:
            result = status_cache.get(child.name)
            if result and result.get('status') != 'correct':
                # The student got this wrong. Check if there is a review tip to show.
                tip_html = child.get_review_tip()
                if tip_html:
                    if getattr(self.runtime, 'replace_jump_to_id_urls',
                               None) is not None:
                        tip_html = self.runtime.replace_jump_to_id_urls(
                            tip_html)
                    review_tips.append(tip_html)
        return review_tips

    def show_extended_feedback(self):
        return self.extended_feedback and self.max_attempts_reached

    @XBlock.json_handler
    def get_results(self, queries, suffix=''):
        """
        Gets detailed results in the case of extended feedback.

        Right now there are two ways to get results-- through the template upon loading up
        the mentoring block, or after submission of an AJAX request like in
        submit or get_results here.
        """
        if self.mode == 'standard':
            results, completed, show_message = self._get_standard_results()
            mentoring_completed = completed
        else:
            if not self.show_extended_feedback():
                return {
                    'results': [],
                    'error': 'Extended feedback results cannot be obtained.'
                }

            results, completed, show_message = self._get_assessment_results(
                queries)
            mentoring_completed = True

        result = {
            'results': results,
            'completed': completed,
            'step': self.step,
            'max_attempts': self.max_attempts,
            'num_attempts': self.num_attempts,
        }

        if show_message:
            result['message'] = self.get_message(mentoring_completed)

        return result

    def _get_standard_results(self):
        """
        Gets previous submissions results as if submit was called with exactly the same values as last time.
        """
        results = []
        completed = True
        show_message = (not self.hide_feedback) and bool(self.student_results)

        # In standard mode, all children are visible simultaneously, so need to collect results for all of them
        for child in self.steps:
            child_result = child.get_last_result()
            results.append([child.name, child_result])
            completed = completed and (child_result.get('status', None)
                                       == 'correct')

        return results, completed, show_message

    def _get_assessment_results(self, queries):
        """
        Gets detailed results in the case of extended feedback.

        It may be a good idea to eventually have this function get results
        in the general case instead of loading them in the template in the future,
        and only using it for extended feedback situations.

        Right now there are two ways to get results-- through the template upon loading up
        the mentoring block, or after submission of an AJAX request like in
        submit or get_results here.
        """
        results = []
        completed = True
        choices = dict(self.student_results)
        # Only one child should ever be of concern with this method.
        for child in self.steps:
            if child.name and child.name in queries:
                results = [child.name, child.get_results(choices[child.name])]
                # Children may have their own definition of 'completed' which can vary from the general case
                # of the whole mentoring block being completed. This is because in standard mode, all children
                # must be correct to complete the block. In assessment mode with extended feedback, completion
                # happens when you're out of attempts, no matter how you did.
                completed = choices[child.name]['status']
                break

        return results, completed, True

    @XBlock.json_handler
    def submit(self, submissions, suffix=''):
        log.info(u'Received submissions: {}'.format(submissions))
        # server-side check that the user is allowed to submit:
        if self.max_attempts_reached:
            raise JsonHandlerError(
                403, "Maximum number of attempts already reached.")
        elif self.has_missing_dependency:
            raise JsonHandlerError(
                403,
                "You need to complete all previous steps before being able to complete the current one."
            )

        # This has now been attempted:
        self.attempted = True

        if self.is_assessment:
            return self.handle_assessment_submit(submissions, suffix)

        submit_results = []
        previously_completed = self.completed
        completed = True
        for child in self.steps:
            if child.name and child.name in submissions:
                submission = submissions[child.name]
                child_result = child.submit(submission)
                submit_results.append([child.name, child_result])
                child.save()
                completed = completed and (child_result['status'] == 'correct')

        if completed and self.next_step == self.url_name:
            self.next_step = self.followed_by

        # Update the score and attempts, unless the user had already achieved a perfect score ("completed"):
        if not previously_completed:
            # Update the results
            while self.student_results:
                self.student_results.pop()
            for result in submit_results:
                self.student_results.append(result)

            # Save the user's latest score
            self.runtime.publish(self, 'grade', {
                'value': self.score.raw,
                'max_value': self.max_score(),
            })

            # Mark this as having used an attempt:
            if self.max_attempts > 0:
                self.num_attempts += 1

        # Save the completion status.
        # Once it has been completed once, keep completion even if user changes values
        self.completed = bool(completed) or previously_completed

        message = self.get_message(completed)
        raw_score = self.score.raw

        self.runtime.publish(
            self, 'xblock.problem_builder.submitted', {
                'num_attempts': self.num_attempts,
                'submitted_answer': submissions,
                'grade': raw_score,
            })

        return {
            'results': submit_results,
            'completed': self.completed,
            'message': message,
            'max_attempts': self.max_attempts,
            'num_attempts': self.num_attempts,
        }

    def feedback_dispatch(self, target_data, stringify):
        if self.show_extended_feedback():
            if stringify:
                return json.dumps(target_data)
            else:
                return target_data

    def correct_json(self, stringify=True):
        return self.feedback_dispatch(self.score.correct, stringify)

    def incorrect_json(self, stringify=True):
        return self.feedback_dispatch(self.score.incorrect, stringify)

    def partial_json(self, stringify=True):
        return self.feedback_dispatch(self.score.partially_correct, stringify)

    def handle_assessment_submit(self, submissions, suffix):
        completed = False
        current_child = None
        children = [
            self.runtime.get_block(child_id) for child_id in self.children
        ]
        children = [
            child for child in children
            if not isinstance(child, MentoringMessageBlock)
        ]
        # The following is faster than the self.step_ids property
        steps = [
            child for child in children if isinstance(child, QuestionMixin)
        ]
        assessment_message = None
        review_tips = []

        for child in children:
            if child.name and child.name in submissions:
                submission = submissions[child.name]

                # Assessment mode doesn't allow to modify answers
                # This will get the student back at the step he should be
                current_child = child
                step = steps.index(child)
                if self.step > step or self.max_attempts_reached:
                    step = self.step
                    completed = False
                    break

                self.step = step + 1

                child_result = child.submit(submission)
                if 'tips' in child_result:
                    del child_result['tips']
                self.student_results.append([child.name, child_result])
                completed = child_result['status']

        event_data = {}

        score = self.score

        if current_child == steps[-1]:
            log.info(u'Last assessment step submitted: {}'.format(submissions))
            self.runtime.publish(
                self, 'grade', {
                    'value': score.raw,
                    'max_value': self.max_score(),
                    'score_type': 'proficiency',
                })
            event_data['final_grade'] = score.raw
            assessment_message = self.assessment_message
            review_tips = self.review_tips

            self.num_attempts += 1
            self.completed = True

        event_data['exercise_id'] = current_child.name
        event_data['num_attempts'] = self.num_attempts
        event_data['submitted_answer'] = submissions

        self.runtime.publish(self,
                             'xblock.problem_builder.assessment.submitted',
                             event_data)

        return {
            'completed': completed,
            'max_attempts': self.max_attempts,
            'num_attempts': self.num_attempts,
            'step': self.step,
            'score': score.percentage,
            'correct_answer': len(score.correct),
            'incorrect_answer': len(score.incorrect),
            'partially_correct_answer': len(score.partially_correct),
            'correct': self.correct_json(stringify=False),
            'incorrect': self.incorrect_json(stringify=False),
            'partial': self.partial_json(stringify=False),
            'extended_feedback': self.show_extended_feedback() or '',
            'assessment_message': assessment_message,
            'assessment_review_tips': review_tips,
        }

    @XBlock.json_handler
    def try_again(self, data, suffix=''):

        if self.max_attempts_reached:
            return {'result': 'error', 'message': 'max attempts reached'}

        # reset
        self.step = 0
        self.completed = False

        while self.student_results:
            self.student_results.pop()

        return {'result': 'success'}

    def validate(self):
        """
        Validates the state of this XBlock except for individual field values.
        """
        validation = super(MentoringBlock, self).validate()
        a_child_has_issues = False
        message_types_present = set()
        for child_id in self.children:
            child = self.runtime.get_block(child_id)
            # Check if the child has any errors:
            if not child.validate().empty:
                a_child_has_issues = True
            # Ensure there is only one "message" block of each type:
            if isinstance(child, MentoringMessageBlock):
                msg_type = child.type
                if msg_type in message_types_present:
                    validation.add(
                        ValidationMessage(
                            ValidationMessage.ERROR,
                            self.
                            _(u"There should only be one '{msg_type}' message component."
                              ).format(msg_type=msg_type)))
                message_types_present.add(msg_type)
        if a_child_has_issues:
            validation.add(
                ValidationMessage(
                    ValidationMessage.ERROR,
                    self.
                    _(u"A component inside this mentoring block has issues.")))
        return validation

    def author_edit_view(self, context):
        """
        Add some HTML to the author view that allows authors to add child blocks.
        """
        local_context = context.copy()
        local_context['author_edit_view'] = True
        fragment = super(MentoringBlock, self).author_edit_view(local_context)
        fragment.add_content(
            loader.render_template('templates/html/mentoring_url_name.html',
                                   {'url_name': self.url_name}))
        fragment.add_css_url(
            self.runtime.local_resource_url(self,
                                            'public/css/problem-builder.css'))
        fragment.add_css_url(
            self.runtime.local_resource_url(
                self, 'public/css/problem-builder-edit.css'))
        fragment.add_css_url(
            self.runtime.local_resource_url(
                self, 'public/css/problem-builder-tinymce-content.css'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/util.js'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self,
                                            'public/js/container_edit.js'))
        fragment.initialize_js('ProblemBuilderContainerEdit')

        return fragment

    @staticmethod
    def workbench_scenarios():
        """
        Scenarios displayed by the workbench. Load them from external (private) repository
        """
        return loader.load_scenarios_from_path('templates/xml')
Beispiel #4
0
class CoderXBlock(XBlock):
    """
    TO-DO: document what your XBlock does.
    """

    # Fields are defined on the class.  You can access them in your code as
    # self.<fieldname>.

    # TO-DO: delete count, and define your own fields.
    count = Integer(
        default=0, scope=Scope.user_state,
        help="A simple counter, to show something happening",
    )

    def resource_string(self, path):
        """Handy helper for getting resources from our kit."""
        data = pkg_resources.resource_string(__name__, path)
        return data.decode("utf8")

    # TO-DO: change this view to display your data your own way.
    def student_view(self, context=None):
        """
        The primary view of the CoderXBlock, shown to students
        when viewing courses.
        """
        html = self.resource_string("static/html/codex.html")
        frag = Fragment(html.format(self=self,student_id = self.runtime.user_id))
        frag.add_css(self.resource_string("static/css/codex.css"))
        frag.add_javascript(self.resource_string("static/js/src/codex.js"))
        frag.initialize_js('CoderXBlock')
        return frag

    # TO-DO: change this handler to perform your own actions.  You may need more
    # than one handler, or you may not need any handlers at all.
    @XBlock.json_handler
    def increment_count(self, data, suffix=''):
        """
        An example handler, which increments the data.
        """
        # Just to show data coming in...
        assert data['hello'] == 'world'

        self.count += 1
        return {"count": self.count}

    # TO-DO: change this to create the scenarios you'd like to see in the
    # workbench while developing your XBlock.
    @staticmethod
    def workbench_scenarios():
        """A canned scenario for display in the workbench."""
        return [
            ("CoderXBlock",
             """<codex/>
             """),
            ("Multiple CoderXBlock",
             """<vertical_demo>
                <codex/>
                <codex/>
                <codex/>
                </vertical_demo>
             """),
        ]
Beispiel #5
0
class ProctoringFields(object):
    """
    Fields that are specific to Proctored or Timed Exams
    """
    is_time_limited = Boolean(
        display_name=_("Is Time Limited"),
        help=_("This setting indicates whether students have a limited time"
               " to view or interact with this courseware component."),
        default=False,
        scope=Scope.settings,
    )

    default_time_limit_minutes = Integer(
        display_name=_("Time Limit in Minutes"),
        help=
        _("The number of minutes available to students for viewing or interacting with this courseware component."
          ),
        default=None,
        scope=Scope.settings,
    )

    is_proctored_enabled = Boolean(
        display_name=_("Is Proctoring Enabled"),
        help=_(
            "This setting indicates whether this exam is a proctored exam."),
        default=False,
        scope=Scope.settings,
    )

    exam_review_rules = String(
        display_name=_("Software Secure Review Rules"),
        help=
        _("This setting indicates what rules the proctoring team should follow when viewing the videos."
          ),
        default='',
        scope=Scope.settings,
    )

    is_practice_exam = Boolean(
        display_name=_("Is Practice Exam"),
        help=
        _("This setting indicates whether this exam is for testing purposes only. Practice exams are not verified."
          ),
        default=False,
        scope=Scope.settings,
    )

    is_onboarding_exam = Boolean(
        display_name=_("Is Onboarding Exam"),
        help=_(
            "This setting indicates whether this exam is an onboarding exam."),
        default=False,
        scope=Scope.settings,
    )

    def _get_course(self):
        """
        Return course by course id.
        """
        return self.descriptor.runtime.modulestore.get_course(self.course_id)  # pylint: disable=no-member

    @property
    def is_timed_exam(self):
        """
        Alias the permutation of above fields that corresponds to un-proctored timed exams
        to the more clearly-named is_timed_exam
        """
        return not self.is_proctored_enabled and not self.is_practice_exam and self.is_time_limited

    @property
    def is_proctored_exam(self):
        """ Alias the is_proctored_enabled field to the more legible is_proctored_exam """
        return self.is_proctored_enabled

    @property
    def allow_proctoring_opt_out(self):
        """
        Returns true if the learner should be given the option to choose between
        taking a proctored exam, or opting out to take the exam without proctoring.
        """
        return self._get_course().allow_proctoring_opt_out

    @is_proctored_exam.setter
    def is_proctored_exam(self, value):
        """ Alias the is_proctored_enabled field to the more legible is_proctored_exam """
        self.is_proctored_enabled = value
Beispiel #6
0
class StudentMixin(object):
    """
    Student view for Tagged Text XBlock
    """

    plays = Integer(default=0,
                    scope=Scope.user_state,
                    help="Number of times the sound was played")

    @property
    def configured(self):
        return True if self.file_url else False

    @property
    def can_play(self):
        return self.configured and (self.max_plays is None
                                    or self.plays < self.max_plays)

    def _get_sound_url(self):
        if self.can_play:
            return self.file_url

        return None

    def _get_state(self):
        return {
            'configured': self.configured,
            'plays': self.plays,
            'maxPlays': self.max_plays,
            'canPlay': self.can_play,
            'options': {
                'showControls': self.show_controls,
                'autoplay': self.autoplay
            }
        }

    def student_view(self, context=None):
        """
        The primary view of the AudioXBlock, shown to students
        when viewing courses.
        """

        template = render_template('templates/student.html')
        frag = Fragment(template)
        frag.add_css(load_resource('static/style/xblock-audio.min.css'))
        frag.add_javascript(load_resource('static/script/howler.min.js'))
        frag.add_javascript(load_resource('static/script/handlebars.min.js'))
        frag.add_javascript(load_resource('static/script/xblock-audio.min.js'))
        frag.initialize_js('AudioXBlockStudent')
        return frag

    @XBlock.json_handler
    def get_state(self, data, suffix=''):
        state = self._get_state()

        return {'state': state, 'success': True}

    @XBlock.json_handler
    def play(self, data, suffix=''):
        if self.can_play:
            url = self._get_sound_url()

            self.plays += 1

            return {'url': url, 'success': True}

        return {'success': False}
Beispiel #7
0
class IF_P_OR_Q_THEN_R_XBLOCK2CLASS(XBlock):
    """
    A XBlock providing CTAT tutors.
    """

    ### xBlock tag variables
    width = Integer(help="Width of the StatTutor frame.",
                    default=690,
                    scope=Scope.content)
    height = Integer(help="Height of the StatTutor frame.",
                     default=550,
                     scope=Scope.content)

    ### Grading variables
    has_score = Boolean(default=True, scope=Scope.content)
    icon_class = String(default="problem", scope=Scope.content)
    score = Integer(help="Current count of correctly completed student steps",
                    scope=Scope.user_state,
                    default=0)
    max_problem_steps = Integer(help="Total number of steps",
                                scope=Scope.user_state,
                                default=1)

    def max_score(self):
        """ The maximum raw score of the problem. """
        return 1  #self.max_problem_steps

    attempted = Boolean(help="True if at least one step has been completed",
                        scope=Scope.user_state,
                        default=False)
    completed = Boolean(
        help="True if all of the required steps are correctly completed",
        scope=Scope.user_state,
        default=False)
    weight = Float(
        display_name="Problem Weight",
        help=("Defines the number of points each problem is worth. "
              "If the value is not set, the problem is worth the sum of the "
              "option point values."),
        values={
            "min": 0,
            "step": .1
        },
        scope=Scope.settings)  # weight needs to be set to something

    ### Basic interface variables
    src = String(help="The source html file for CTAT interface.",
                 default="public/if_p_or_q_then_r.ed.html",
                 scope=Scope.settings)
    brd = String(help="The behavior graph.",
                 default="public/problem_files/if_p_or_q_then_r.brd",
                 scope=Scope.settings)

    ### CTATConfiguration variables
    log_name = String(help="Problem name to log",
                      default="CTATEdXProblem",
                      scope=Scope.settings)
    log_dataset = String(help="Dataset name to log",
                         default="edxdataset",
                         scope=Scope.settings)
    log_level1 = String(help="Level name to log",
                        default="unit1",
                        scope=Scope.settings)
    log_type1 = String(help="Level type to log",
                       default="unit",
                       scope=Scope.settings)
    log_level2 = String(help="Level name to log",
                        default="unit2",
                        scope=Scope.settings)
    log_type2 = String(help="Level type to log",
                       default="unit",
                       scope=Scope.settings)
    log_url = String(help="URL of the logging service",
                     default="http://pslc-qa.andrew.cmu.edu/log/server",
                     scope=Scope.settings)
    logtype = String(help="How should data be logged",
                     default="clienttologserver",
                     scope=Scope.settings)
    log_diskdir = String(
        help="Directory for log files relative to the tutoring service",
        default=".",
        scope=Scope.settings)
    log_port = String(help="Port used by the tutoring service",
                      default="8080",
                      scope=Scope.settings)
    log_remoteurl = String(
        help="Location of the tutoring service (localhost or domain name)",
        default="localhost",
        scope=Scope.settings)

    ctat_connection = String(help="",
                             default="javascript",
                             scope=Scope.settings)

    ### user information
    saveandrestore = String(help="Internal data blob used by the tracer",
                            default="",
                            scope=Scope.user_state)
    skillstring = String(help="Internal data blob used by the tracer",
                         default="",
                         scope=Scope.user_info)

    def logdebug(self, aMessage):
        global dbgopen, tmp_file
        if (dbgopen == False):
            tmp_file = open("/tmp/edx-tmp-log-ctat.txt", "a", 0)
            dbgopen = True
        tmp_file.write(aMessage + "\n")

    def resource_string(self, path):
        """ Read in the contents of a resource file. """
        data = pkg_resources.resource_string(__name__, path)
        return data.decode("utf8")

    def strip_local(self, url):
        """ Returns the given url with //localhost:port removed. """
        return re.sub('//localhost(:\d*)?', '', url)

    def get_local_resource_url(self, url):
        """ Wrapper for self.runtime.local_resource_url. """
        return self.strip_local(self.runtime.local_resource_url(self, url))

    # -------------------------------------------------------------------
    # TO-DO: change this view to display your data your own way.
    # -------------------------------------------------------------------

    def student_view(self, context=None):
        """
        Create a Fragment used to display a CTAT StatTutor xBlock to a student.

        Returns a Fragment object containing the HTML to display
        """
        # read in template html
        html = self.resource_string("static/html/ctatxblock.html")
        frag = Fragment(
            html.format(tutor_html=self.get_local_resource_url(self.src)))
        config = self.resource_string("static/js/CTATConfig.js")
        frag.add_javascript(
            config.format(
                self=self,
                tutor_html=self.get_local_resource_url(self.src),
                question_file=self.get_local_resource_url(self.brd),
                student_id=self.runtime.anonymous_student_id if hasattr(
                    self.runtime, 'anonymous_student_id') else 'bogus-sdk-id',
                guid=str(uuid.uuid4())))
        frag.add_javascript(
            self.resource_string("static/js/Initialize_CTATXBlock.js"))
        frag.initialize_js('Initialize_CTATXBlock')
        return frag

    @XBlock.json_handler
    def ctat_grade(self, data, suffix=''):
        #self.logdebug ("ctat_grade ()")
        #print('ctat_grade:',data,suffix)
        self.attempted = True
        self.score = int(data.get('value'))
        self.max_problem_steps = int(data.get('max_value'))
        self.completed = self.score >= self.max_problem_steps
        scaled = float(self.score) / float(self.max_problem_steps)
        # trying with max of 1.
        event_data = {'value': scaled, 'max_value': 1}
        self.runtime.publish(self, 'grade', event_data)
        return {
            'result': 'success',
            'finished': self.completed,
            'score': scaled
        }

    # -------------------------------------------------------------------
    # TO-DO: change this view to display your data your own way.
    # -------------------------------------------------------------------
    def studio_view(self, context=None):
        html = self.resource_string("static/html/ctatstudio.html")
        frag = Fragment(html.format(self=self))
        js = self.resource_string("static/js/ctatstudio.js")
        frag.add_javascript(unicode(js))
        frag.initialize_js('CTATXBlockStudio')
        return frag

    @XBlock.json_handler
    def studio_submit(self, data, suffix=''):
        """
        Called when submitting the form in Studio.
        """
        self.src = data.get('src')
        self.brd = data.get('brd')
        self.width = data.get('width')
        self.height = data.get('height')
        return {'result': 'success'}

    @XBlock.json_handler
    def ctat_save_problem_state(self, data, suffix=''):
        """Called from CTATLMS.saveProblemState."""
        if data.get('state') is not None:
            self.saveandrestore = data.get('state')
            return {'result': 'success'}
        return {'result': 'failure'}

    @XBlock.json_handler
    def ctat_get_problem_state(self, data, suffix=''):
        return {'result': 'success', 'state': self.saveandrestore}

    @XBlock.json_handler
    def ctat_set_variable(self, data, suffix=''):
        self.logdebug("ctat_set_variable ()")

        for key in data:
            #value = base64.b64decode(data[key])
            value = data[key]
            self.logdebug("Setting ({}) to ({})".format(key, value))
            if (key == "href"):
                self.href = value
            elif (key == "ctatmodule"):
                self.ctatmodule = value
            elif (key == "problem"):
                self.problem = value
            elif (key == "dataset"):
                self.dataset = value
            elif (key == "level1"):
                self.level1 = value
            elif (key == "type1"):
                self.type1 = value
            elif (key == "level2"):
                self.level2 = value
            elif (key == "type2"):
                self.type2 = value
            elif (key == "logurl"):
                self.logurl = value
            elif (key == "logtype"):
                self.logtype = value
            elif (key == "diskdir"):
                self.diskdir = value
            elif (key == "port"):
                self.port = value
            elif (key == "remoteurl"):
                self.remoteurl = value
            elif (key == "connection"):
                self.connection = value
            #elif (key=="src"):
            #   self.src = value
            elif (key == "saveandrestore"):
                self.logdebug("Received saveandrestore request")
                self.saveandrestore = value
            #elif (key=="skillstring"):
            #  self.skillstring = value

        return {'result': 'success'}

    # -------------------------------------------------------------------
    # TO-DO: change this to create the scenarios you'd like to see in the
    # workbench while developing your XBlock.
    # -------------------------------------------------------------------
    @staticmethod
    def workbench_scenarios():
        return [
            ("IF_P_OR_Q_THEN_R_XBLOCK2CLASS", """<vertical_demo>
                <if_p_or_q_then_r_xblock2 width="" height=""/>
                </vertical_demo>
             """),
        ]
Beispiel #8
0
def test_values_dict():
    # Test that the format expected for integers is allowed
    test_field = Integer(values={"min": 1, "max": 100})
    assert_equals({"min": 1, "max": 100}, test_field.values)
Beispiel #9
0
class StaffGradedAssignmentXBlock2(XBlock):
    """
    This block defines a Staff Graded Assignment.  Students are shown a rubric
    and invited to upload a file which is then graded by staff.
    """
    has_score = True
    icon_class = 'problem'

    display_name = String(
        display_name=_("display_name"),
        default=_('K-MOOC Staff Graded Assignment'),
        scope=Scope.settings,
        help=
        _("This name appears in the horizontal navigation at the top of the page."
          ))

    pass_file = Boolean(display_name=_("pass file"),
                        help=("pass file"),
                        default=True,
                        scope=Scope.settings)

    weight = Float(
        display_name=_("Problem Weight"),
        help=_("Defines the number of points each problem is worth. "
               "If the value is not set, the problem is worth the sum of the "
               "option point values."),
        values={
            "min": 0,
            "step": .1
        },
        scope=Scope.settings)

    points = Integer(
        display_name=_("Maximum score"),
        help=_("Maximum grade score given to assignment by staff."),
        default=100,
        scope=Scope.settings)

    staff_score = Integer(
        display_name=_("Score assigned by non-instructor staff"),
        help=("Score will need to be approved by instructor before being "
              "published."),
        default=None,
        scope=Scope.settings)

    comment = String(display_name=_("Instructor comment"),
                     default='',
                     scope=Scope.user_state,
                     help="Feedback given to student by instructor.")

    annotated_sha1 = String(
        display_name=_("Annotated SHA1"),
        scope=Scope.user_state,
        default=None,
        help=("sha1 of the annotated file uploaded by the instructor for "
              "this assignment."))

    annotated_filename = String(
        display_name=_("Annotated file name"),
        scope=Scope.user_state,
        default=None,
        help="The name of the annotated file uploaded for this assignment.")

    annotated_mimetype = String(
        display_name=_("Mime type of annotated file"),
        scope=Scope.user_state,
        default=None,
        help="The mimetype of the annotated file uploaded for this assignment."
    )

    annotated_timestamp = DateTime(display_name=_("Timestamp"),
                                   scope=Scope.user_state,
                                   default=None,
                                   help="When the annotated file was uploaded")

    def max_score(self):
        return self.points

    @reify
    def block_id(self):
        # cargo culted gibberish
        return self.scope_ids.usage_id

    def student_submission_id(self, id=None):
        """
        Returns dict required by the submissions app for creating and
        retrieving submissions for a particular student.
        """
        if id is None:
            id = self.xmodule_runtime.anonymous_student_id
            assert id != 'MOCK', "Forgot to call 'personalize' in test."
        return {
            "student_id": id,
            "course_id": self.course_id,
            "item_id": self.block_id,
            "item_type": 'sga',  # ???
        }

    def get_submission(self, id=None):
        """
        Get student's most recent submission.
        """
        try:
            submissions = submissions_api.get_submissions(
                self.student_submission_id(id))
        except:
            submissions = None

        if submissions:
            # If I understand docs correctly, most recent submission should
            # be first
            return submissions[0]

    def get_score(self, id=None):
        """
        Get student's current score.
        """
        score = submissions_api.get_score(self.student_submission_id(id))
        if score:
            return score['points_earned']

    @reify
    def score(self):
        return self.get_score()

    def student_view(self, context=None):
        """
        The primary view of the StaffGradedAssignmentXBlock2, shown to students
        when viewing courses.
        """
        context = {
            "student_state": json.dumps(self.student_state()),
            "id": self.location.name.replace('.', '_'),
        }
        if self.show_staff_grading_interface():
            context['is_course_staff'] = True
            self.update_staff_debug_context(context)

        fragment = Fragment()

        fragment.add_content(
            render_template('templates/staff_graded_assignment/show.html',
                            context))
        fragment.add_css(_resource("static/css/edx_sga.css"))
        fragment.add_javascript(_resource("static/js/src/edx_sga.js"))
        fragment.initialize_js('StaffGradedAssignmentXBlock2')
        return fragment

    def update_staff_debug_context(self, context):
        published = self.start
        context['is_released'] = published and published < _now()
        context['location'] = self.location
        context['category'] = type(self).__name__
        context['fields'] = [(name, field.read_from(self))
                             for name, field in self.fields.items()]

    def student_state(self):
        """
        Returns a JSON serializable representation of student's state for
        rendering in client view.
        """
        submission = self.get_submission()
        if submission:
            uploaded = {"filename": submission['answer']['filename']}
        else:
            uploaded = None

        if self.annotated_sha1:
            annotated = {"filename": self.annotated_filename}
        else:
            annotated = None

        score = self.score
        if score is not None:
            graded = {'score': score, 'comment': self.comment}
        else:
            graded = None

        return {
            "uploaded": uploaded,
            "annotated": annotated,
            "graded": graded,
            "max_score": self.max_score(),
            "upload_allowed": self.upload_allowed(),
            "pass_file": self.pass_file,
            "display_name": str(self.display_name),
        }

    def staff_grading_data(self):
        def get_student_data():
            # Submissions doesn't have API for this, just use model directly
            students = SubmissionsStudent.objects.filter(
                course_id=self.course_id, item_id=self.block_id)
            for student in students:
                submission = self.get_submission(student.student_id)
                if not submission:
                    continue
                user = user_by_anonymous_id(student.student_id)
                module, _ = StudentModule.objects.get_or_create(
                    course_id=self.course_id,
                    module_state_key=self.location,
                    student=user,
                    defaults={
                        'state': '{}',
                        'module_type': self.category,
                    })
                state = json.loads(module.state)
                score = self.get_score(student.student_id)
                approved = score is not None
                if score is None:
                    score = state.get('staff_score')
                    needs_approval = score is not None
                else:
                    needs_approval = False
                instructor = self.is_instructor()
                yield {
                    'module_id':
                    module.id,
                    'student_id':
                    student.student_id,
                    'submission_id':
                    submission['uuid'],
                    'username':
                    module.student.username,
                    'fullname':
                    module.student.profile.name,
                    'filename':
                    submission['answer']["filename"],
                    'timestamp':
                    submission['created_at'].strftime(
                        DateTime.DATETIME_FORMAT),
                    'score':
                    score,
                    'approved':
                    approved,
                    'needs_approval':
                    instructor and needs_approval,
                    'may_grade':
                    instructor or not approved,
                    'annotated':
                    state.get("annotated_filename"),
                    'comment':
                    state.get("comment", ''),
                }

        return {
            'assignments': list(get_student_data()),
            'max_score': self.max_score(),
        }

    def studio_view(self, context=None):
        try:
            cls = type(self)

            def none_to_empty(x):
                return x if x is not None else ''

            edit_fields = (
                (field, none_to_empty(getattr(self, field.name)), validator)
                for field, validator in (
                    (cls.display_name, 'string'),
                    # (cls.pass_file, 'boolean'),
                    (cls.points, 'number'),
                    # (cls.weight, 'number')
                ))

            context = {'fields': edit_fields}
            fragment = Fragment()
            fragment.add_content(
                render_template('templates/staff_graded_assignment/edit.html',
                                context))
            fragment.add_javascript(_resource("static/js/src/studio.js"))
            fragment.initialize_js('StaffGradedAssignmentXBlock2')
            return fragment
        except:  # pragma: NO COVER
            log.error("Don't swallow my exceptions", exc_info=True)
            raise

    @XBlock.json_handler
    def save_sga(self, data, suffix=''):
        self.display_name = data.get('display_name', self.display_name)
        self.weight = data.get('weight', self.weight)
        self.pass_file = data.get('pass_file', self.weight)

        # Validate points before saving
        points = data.get('points', self.points)
        # Check that we are an int
        try:
            points = int(points)
        except ValueError:
            raise JsonHandlerError(400, 'Points must be an integer')
        # Check that we are positive
        if points < 0:
            raise JsonHandlerError(400, 'Points must be a positive integer')
        self.points = points

    @XBlock.handler
    def upload_assignment(self, request, suffix=''):
        require(self.upload_allowed())
        answer = {
            "sha1": 'pass_file',
            "filename": '',
            "mimetype": '',
        }
        student_id = self.student_submission_id()
        submissions_api.create_submission(student_id, answer)
        return Response(json_body=self.student_state())

        # require(self.upload_allowed())
        # upload = request.params['assignment']
        # sha1 = _get_sha1(upload.file)
        # answer = {
        #     "sha1": sha1,
        #     "filename": upload.file.name,
        #     "mimetype": mimetypes.guess_type(upload.file.name)[0],
        # }
        # student_id = self.student_submission_id()
        # submissions_api.create_submission(student_id, answer)
        # path = self._file_storage_path(sha1, upload.file.name)
        # if not default_storage.exists(path):
        #     default_storage.save(path, File(upload.file))
        # return Response(json_body=self.student_state())

    @XBlock.handler
    def staff_upload_annotated(self, request, suffix=''):
        require(self.is_course_staff())
        upload = request.params['annotated']
        module = StudentModule.objects.get(pk=request.params['module_id'])
        state = json.loads(module.state)
        state['annotated_sha1'] = sha1 = _get_sha1(upload.file)
        state['annotated_filename'] = filename = upload.file.name
        state['annotated_mimetype'] = mimetypes.guess_type(upload.file.name)[0]
        state['annotated_timestamp'] = _now().strftime(
            DateTime.DATETIME_FORMAT)
        path = self._file_storage_path(sha1, filename)
        if not default_storage.exists(path):
            default_storage.save(path, File(upload.file))
        module.state = json.dumps(state)
        module.save()
        return Response(json_body=self.staff_grading_data())

    @XBlock.handler
    def download_assignment(self, request, suffix=''):
        answer = self.get_submission()['answer']
        path = self._file_storage_path(answer['sha1'], answer['filename'])
        return self.download(path, answer['mimetype'], answer['filename'])

    @XBlock.handler
    def download_annotated(self, request, suffix=''):
        path = self._file_storage_path(
            self.annotated_sha1,
            self.annotated_filename,
        )
        return self.download(path, self.annotated_mimetype,
                             self.annotated_filename)

    @XBlock.handler
    def staff_download(self, request, suffix=''):
        require(self.is_course_staff())
        submission = self.get_submission(request.params['student_id'])
        answer = submission['answer']
        path = self._file_storage_path(answer['sha1'], answer['filename'])
        return self.download(path, answer['mimetype'], answer['filename'])

    @XBlock.handler
    def staff_download_annotated(self, request, suffix=''):
        require(self.is_course_staff())
        module = StudentModule.objects.get(pk=request.params['module_id'])
        state = json.loads(module.state)
        path = self._file_storage_path(state['annotated_sha1'],
                                       state['annotated_filename'])
        return self.download(path, state['annotated_mimetype'],
                             state['annotated_filename'])

    def download(self, path, mimetype, filename):
        BLOCK_SIZE = (1 << 10) * 8  # 8kb
        file = default_storage.open(path)
        app_iter = iter(partial(file.read, BLOCK_SIZE), '')
        return Response(app_iter=app_iter,
                        content_type=mimetype,
                        content_disposition="attachment; filename=" + filename)

    @XBlock.handler
    def get_staff_grading_data(self, request, suffix=''):
        require(self.is_course_staff())
        return Response(json_body=self.staff_grading_data())

    @XBlock.handler
    def enter_grade(self, request, suffix=''):
        require(self.is_course_staff())
        module = StudentModule.objects.get(pk=request.params['module_id'])
        state = json.loads(module.state)
        score = int(request.params['grade'])
        if self.is_instructor():
            uuid = request.params['submission_id']
            submissions_api.set_score(uuid, score, self.max_score())
        else:
            state['staff_score'] = score
        state['comment'] = request.params.get('comment', '')
        module.state = json.dumps(state)
        module.save()
        return Response(json_body=self.staff_grading_data())

    @XBlock.handler
    def remove_grade(self, request, suffix=''):
        require(self.is_course_staff())
        student_id = request.params['student_id']
        submissions_api.reset_score(student_id, self.course_id, self.block_id)
        module = StudentModule.objects.get(pk=request.params['module_id'])
        state = json.loads(module.state)
        state['staff_score'] = None
        state['comment'] = ''
        state['annotated_sha1'] = None
        state['annotated_filename'] = None
        state['annotated_mimetype'] = None
        state['annotated_timestamp'] = None
        module.state = json.dumps(state)
        module.save()
        return Response(json_body=self.staff_grading_data())

    def is_course_staff(self):
        return getattr(self.xmodule_runtime, 'user_is_staff', False)

    def is_instructor(self):
        return self.xmodule_runtime.get_user_role() == 'instructor'

    def show_staff_grading_interface(self):
        in_studio_preview = self.scope_ids.user_id is None
        return self.is_course_staff() and not in_studio_preview

    def past_due(self):
        due = get_extended_due_date(self)
        if due is not None:
            return _now() > due
        return False

    def upload_allowed(self):
        return not self.past_due() and self.score is None

    def _file_storage_path(self, sha1, filename):
        path = ('{loc.org}/{loc.course}/{loc.block_type}/{loc.block_id}'
                '/{sha1}{ext}'.format(loc=self.location,
                                      sha1=sha1,
                                      ext=os.path.splitext(filename)[1]))
        return path
Beispiel #10
0
class ProctoringFields(object):
    """
    Fields that are specific to Proctored or Timed Exams
    """
    is_time_limited = Boolean(
        display_name=_("Is Time Limited"),
        help=_("This setting indicates whether students have a limited time"
               " to view or interact with this courseware component."),
        default=False,
        scope=Scope.settings,
    )

    default_time_limit_minutes = Integer(
        display_name=_("Time Limit in Minutes"),
        help=
        _("The number of minutes available to students for viewing or interacting with this courseware component."
          ),
        default=None,
        scope=Scope.settings,
    )

    is_proctored_enabled = Boolean(
        display_name=_("Is Proctoring Enabled"),
        help=_(
            "This setting indicates whether this exam is a proctored exam."),
        default=False,
        scope=Scope.settings,
    )

    exam_review_rules = String(
        display_name=_("Software Secure Review Rules"),
        help=
        _("This setting indicates what rules the proctoring team should follow when viewing the videos."
          ),
        default='',
        scope=Scope.settings,
    )

    is_practice_exam = Boolean(
        display_name=_("Is Practice Exam"),
        help=
        _("This setting indicates whether this exam is for testing purposes only. Practice exams are not verified."
          ),
        default=False,
        scope=Scope.settings,
    )

    @property
    def is_timed_exam(self):
        """
        Alias the permutation of above fields that corresponds to un-proctored timed exams
        to the more clearly-named is_timed_exam
        """
        return not self.is_proctored_enabled and not self.is_practice_exam and self.is_time_limited

    @property
    def is_proctored_exam(self):
        """ Alias the is_proctored_enabled field to the more legible is_proctored_exam """
        return self.is_proctored_enabled

    @is_proctored_exam.setter
    def is_proctored_exam(self, value):
        """ Alias the is_proctored_enabled field to the more legible is_proctored_exam """
        self.is_proctored_enabled = value
from web_fragments.fragment import Fragment
from xblock.core import XBlock
from xblock.fields import Integer, Scope


class {{cookiecutter.class_name}}(XBlock):
    """
    TO-DO: document what your XBlock does.
    """

    # Fields are defined on the class.  You can access them in your code as
    # self.<fieldname>.

    # TO-DO: delete count, and define your own fields.
    count = Integer(
        default=0, scope=Scope.user_state,
        help="A simple counter, to show something happening",
    )

    def resource_string(self, path):
        """Handy helper for getting resources from our kit."""
        data = pkg_resources.resource_string(__name__, path)
        return data.decode("utf8")

    # TO-DO: change this view to display your data your own way.
    def student_view(self, context=None):
        """
        The primary view of the {{cookiecutter.class_name}}, shown to students
        when viewing courses.
        """
        html = self.resource_string("static/html/{{cookiecutter.short_name|lower}}.html")
        frag = Fragment(html.format(self=self))
class MentoringWithExplicitStepsBlock(BaseMentoringBlock,
                                      StudioContainerWithNestedXBlocksMixin,
                                      I18NService):
    """
    An XBlock providing mentoring capabilities with explicit steps
    """
    USER_STATE_FIELDS = ['num_attempts']

    # Content
    extended_feedback = Boolean(
        display_name=_("Extended feedback"),
        help=_("Show extended feedback when all attempts are used up?"),
        default=False,
        Scope=Scope.content)

    # Settings
    display_name = String(display_name=_("Title (display name)"),
                          help=_("Title to display"),
                          default=_("Step Builder"),
                          scope=Scope.settings)

    # User state
    active_step = Integer(
        # Keep track of the student progress.
        default=0,
        scope=Scope.user_state,
        enforce_type=True)

    editable_fields = ('display_name', 'max_attempts', 'extended_feedback',
                       'weight')

    def build_user_state_data(self, context=None):
        user_state_data = super(MentoringWithExplicitStepsBlock,
                                self).build_user_state_data()
        user_state_data['active_step'] = self.active_step_safe
        user_state_data['score_summary'] = self.get_score_summary()
        return user_state_data

    @lazy
    def question_ids(self):
        """
        Get the usage_ids of all of this XBlock's children that are "Questions".
        """
        return list(
            chain.from_iterable(
                self.runtime.get_block(step_id).step_ids
                for step_id in self.step_ids))

    @lazy
    def questions(self):
        """
        Get all questions associated with this block.
        """
        return [
            self.runtime.get_block(question_id)
            for question_id in self.question_ids
        ]

    @property
    def active_step_safe(self):
        """
        Get self.active_step and double-check that it is a valid value.
        The stored value could be invalid if this block has been edited and new steps were
        added/deleted.
        """
        active_step = self.active_step
        if active_step >= 0 and active_step < len(self.step_ids):
            return active_step
        if active_step == -1 and self.has_review_step:
            return active_step  # -1 indicates the review step
        return 0

    def get_active_step(self):
        """ Get the active step as an instantiated XBlock """
        block = self.runtime.get_block(self.step_ids[self.active_step_safe])
        if block is None:
            log.error("Unable to load step builder step child %s",
                      self.step_ids[self.active_step_safe])
        return block

    @lazy
    def step_ids(self):
        """
        Get the usage_ids of all of this XBlock's children that are steps.
        """
        from .step import MentoringStepBlock  # Import here to avoid circular dependency
        return [
            _normalize_id(child_id) for child_id in self.children
            if child_isinstance(self, child_id, MentoringStepBlock)
        ]

    @lazy
    def steps(self):
        """
        Get the step children of this block.
        """
        return [self.runtime.get_block(step_id) for step_id in self.step_ids]

    def get_question_number(self, question_name):
        question_names = [q.name for q in self.questions]
        return question_names.index(question_name) + 1

    def answer_mapper(self, answer_status):
        steps = self.steps
        answer_map = []
        for step in steps:
            for answer in step.student_results:
                if answer[1]['status'] == answer_status:
                    answer_map.append({
                        'id':
                        answer[0],
                        'details':
                        answer[1],
                        'step':
                        step.step_number,
                        'number':
                        self.get_question_number(answer[0]),
                    })
        return answer_map

    @property
    def has_review_step(self):
        return any(
            child_isinstance(self, child_id, ReviewStepBlock)
            for child_id in self.children)

    @property
    def review_step(self):
        """ Get the Review Step XBlock child, if any. Otherwise returns None """
        for step_id in self.children:
            if child_isinstance(self, step_id, ReviewStepBlock):
                return self.runtime.get_block(step_id)

    @property
    def score(self):
        questions = self.questions
        total_child_weight = sum(
            float(question.weight) for question in questions)
        if total_child_weight == 0:
            return Score(0, 0, [], [], [])
        steps = self.steps
        questions_map = {question.name: question for question in questions}
        points_earned = 0
        for step in steps:
            for question_name, question_results in step.student_results:
                question = questions_map.get(question_name)
                if question:  # Under what conditions would this evaluate to False?
                    points_earned += question_results['score'] * question.weight
        score = points_earned / total_child_weight
        correct = self.answer_mapper(CORRECT)
        incorrect = self.answer_mapper(INCORRECT)
        partially_correct = self.answer_mapper(PARTIAL)

        return Score(score, int(round(score * 100)), correct, incorrect,
                     partially_correct)

    @property
    def complete(self):
        return not self.score.incorrect and not self.score.partially_correct

    @property
    def review_tips(self):
        """ Get review tips, shown for wrong answers. """
        if self.max_attempts > 0 and self.num_attempts >= self.max_attempts:
            # Review tips are only shown if the student is allowed to try again.
            return []
        review_tips = []
        status_cache = dict()
        steps = self.steps
        for step in steps:
            status_cache.update(dict(step.student_results))
        for question in self.questions:
            result = status_cache.get(question.name)
            if result and result.get('status') != 'correct':
                # The student got this wrong. Check if there is a review tip to show.
                tip_html = question.get_review_tip()
                if tip_html:
                    if getattr(self.runtime, 'replace_jump_to_id_urls',
                               None) is not None:
                        tip_html = self.runtime.replace_jump_to_id_urls(
                            tip_html)
                    review_tips.append(tip_html)
        return review_tips

    def show_extended_feedback(self):
        return self.extended_feedback and self.max_attempts_reached

    @XBlock.supports("multi_device")  # Mark as mobile-friendly
    def student_view(self, context):
        fragment = Fragment()
        children_contents = []

        context = context or {}
        context[
            'hide_prev_answer'] = True  # For Step Builder, we don't show the users' old answers when they try again
        context['score_summary'] = self.get_score_summary()
        for child_id in self.children:
            child = self.runtime.get_block(child_id)
            if child is None:  # child should not be None but it can happen due to bugs or permission issues
                child_content = u"<p>[{}]</p>".format(
                    self._(u"Error: Unable to load child component."))
            else:
                child_fragment = self._render_child_fragment(
                    child, context, view='mentoring_view')
                fragment.add_frag_resources(child_fragment)
                child_content = child_fragment.content
            children_contents.append(child_content)

        fragment.add_content(
            loader.render_django_template(
                'templates/html/mentoring_with_steps.html', {
                    'self': self,
                    'title': self.display_name,
                    'show_title': self.show_title,
                    'children_contents': children_contents,
                },
                i18n_service=self.i18n_service))
        fragment.add_css_url(
            self.runtime.local_resource_url(self,
                                            'public/css/problem-builder.css'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(
                self, 'public/js/vendor/underscore-min.js'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/step_util.js'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(
                self, 'public/js/mentoring_with_steps.js'))

        fragment.add_resource(
            loader.load_unicode(
                'templates/html/mentoring_attempts.underscore'), "text/html")
        fragment.initialize_js(
            'MentoringWithStepsBlock', {
                'show_extended_feedback': self.show_extended_feedback(),
            })

        return fragment

    @property
    def allowed_nested_blocks(self):
        """
        Returns a list of allowed nested XBlocks. Each item can be either
        * An XBlock class
        * A NestedXBlockSpec

        If XBlock class is used it is assumed that this XBlock is enabled and allows multiple instances.
        NestedXBlockSpec allows explicitly setting disabled/enabled state, disabled reason (if any) and single/multiple
        instances
        """
        # Import here to avoid circular dependency
        from .step import MentoringStepBlock
        return [
            MentoringStepBlock,
            NestedXBlockSpec(ReviewStepBlock, single_instance=True),
        ]

    @XBlock.json_handler
    def submit(self, data, suffix=None):
        """
        Called when the user has submitted the answer[s] for the current step.
        """
        # First verify that active_step is correct:
        if data.get("active_step") != self.active_step_safe:
            raise JsonHandlerError(
                400, "Invalid Step. Refresh the page and try again.")

        # The step child will process the data:
        step_block = self.get_active_step()
        if not step_block:
            raise JsonHandlerError(500,
                                   "Unable to load the current step block.")
        response_data = step_block.submit(data)

        # Update the active step:
        new_value = self.active_step_safe + 1
        if new_value < len(self.step_ids):
            self.active_step = new_value
        elif new_value == len(self.step_ids):
            # The user just completed the final step.
            # Update the number of attempts:
            self.num_attempts += 1
            # Do we need to render a review (summary of the user's score):
            if self.has_review_step:
                self.active_step = -1
                response_data['review_html'] = self.runtime.render(
                    self.review_step, "mentoring_view", {
                        'score_summary': self.get_score_summary(),
                    }).content
            response_data['num_attempts'] = self.num_attempts
            # And publish the score:
            score = self.score
            grade_data = {
                'value': score.raw,
                'max_value': self.max_score(),
            }
            self.runtime.publish(self, 'grade', grade_data)

        response_data['active_step'] = self.active_step
        return response_data

    def get_score_summary(self):
        if self.num_attempts == 0:
            return {}
        score = self.score
        return {
            'score': score.percentage,
            'correct_answers': len(score.correct),
            'incorrect_answers': len(score.incorrect),
            'partially_correct_answers': len(score.partially_correct),
            'correct': score.correct,
            'incorrect': score.incorrect,
            'partial': score.partially_correct,
            'complete': self.complete,
            'max_attempts_reached': self.max_attempts_reached,
            'show_extended_review': self.show_extended_feedback(),
            'review_tips': self.review_tips,
        }

    @XBlock.json_handler
    def get_num_attempts(self, data, suffix):
        return {'num_attempts': self.num_attempts}

    @XBlock.json_handler
    def try_again(self, data, suffix=''):
        self.active_step = 0

        step_blocks = [
            self.runtime.get_block(child_id) for child_id in self.step_ids
        ]

        for step in step_blocks:
            step.reset()

        return {'active_step': self.active_step}

    def author_preview_view(self, context):
        return self.student_view(context)

    def author_edit_view(self, context):
        """
        Add some HTML to the author view that allows authors to add child blocks.
        """
        fragment = super(MentoringWithExplicitStepsBlock,
                         self).author_edit_view(context)
        fragment.add_content(
            loader.render_template('templates/html/mentoring_url_name.html',
                                   {"url_name": self.url_name}))
        fragment.add_css_url(
            self.runtime.local_resource_url(self,
                                            'public/css/problem-builder.css'))
        fragment.add_css_url(
            self.runtime.local_resource_url(
                self, 'public/css/problem-builder-edit.css'))
        fragment.add_css_url(
            self.runtime.local_resource_url(
                self, 'public/css/problem-builder-tinymce-content.css'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/util.js'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self,
                                            'public/js/container_edit.js'))
        fragment.initialize_js('ProblemBuilderContainerEdit')
        return fragment

    def student_view_data(self, context=None):
        components = []

        for child_id in self.children:
            child = self.runtime.get_block(child_id)
            if hasattr(child, 'student_view_data'):
                components.append(child.student_view_data(context))

        return {
            'title': self.display_name,
            'block_id': unicode(self.scope_ids.usage_id),
            'display_name': self.display_name,
            'show_title': self.show_title,
            'weight': self.weight,
            'extended_feedback': self.extended_feedback,
            'max_attempts': self.max_attempts,
            'components': components,
        }
Beispiel #13
0
class InheritanceMixin(XBlockMixin):
    """Field definitions for inheritable fields."""

    graded = Boolean(
        help="Whether this module contributes to the final course grade",
        scope=Scope.settings,
        default=False,
    )
    start = Date(help="Start time when this module is visible",
                 default=datetime(2030, 1, 1, tzinfo=UTC),
                 scope=Scope.settings)
    due = Date(
        display_name=_("Due Date"),
        help=_("Enter the default date by which problems are due."),
        scope=Scope.settings,
    )
    extended_due = Date(
        help="Date that this problem is due by for a particular student. This "
        "can be set by an instructor, and will override the global due "
        "date if it is set to a date that is later than the global due "
        "date.",
        default=None,
        scope=Scope.user_state,
    )
    visible_to_staff_only = Boolean(
        help=
        _("If true, can be seen only by course staff, regardless of start date."
          ),
        default=False,
        scope=Scope.settings,
    )
    course_edit_method = String(
        display_name=_("Course Editor"),
        help=
        _("Enter the method by which this course is edited (\"XML\" or \"Studio\")."
          ),
        default="Studio",
        scope=Scope.settings,
        deprecated=
        True  # Deprecated because user would not change away from Studio within Studio.
    )
    giturl = String(
        display_name=_("GIT URL"),
        help=_("Enter the URL for the course data GIT repository."),
        scope=Scope.settings,
        deprecated=
        True  # Deprecated because GIT workflow users do not use Studio.
    )
    xqa_key = String(display_name=_("XQA Key"),
                     help=_("This setting is not currently supported."),
                     scope=Scope.settings,
                     deprecated=True)
    annotation_storage_url = String(help=_(
        "Enter the location of the annotation storage server. The textannotation, videoannotation, and imageannotation advanced modules require this setting."
    ),
                                    scope=Scope.settings,
                                    default=
                                    "http://your_annotation_storage.com",
                                    display_name=_(
                                        "URL for Annotation Storage"))
    annotation_token_secret = String(help=_(
        "Enter the secret string for annotation storage. The textannotation, videoannotation, and imageannotation advanced modules require this string."
    ),
                                     scope=Scope.settings,
                                     default=
                                     "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
                                     display_name=_(
                                         "Secret Token String for Annotation"))
    graceperiod = Timedelta(
        help=
        "Amount of time after the due date that submissions will be accepted",
        scope=Scope.settings,
    )
    showanswer = String(
        display_name=_("Show Answer"),
        help=
        _("Specify when the Show Answer button appears for each problem. Valid values are \"always\", \"answered\", \"attempted\", \"closed\", \"finished\", \"past_due\", and \"never\"."
          ),
        scope=Scope.settings,
        default="finished",
    )
    rerandomize = String(
        display_name=_("Randomization"),
        help=
        _("Specify how often variable values in a problem are randomized when a student loads the problem. Valid values are \"always\", \"onreset\", \"never\", and \"per_student\". This setting only applies to problems that have randomly generated numeric values."
          ),
        scope=Scope.settings,
        default="never",
    )
    days_early_for_beta = Float(
        display_name=_("Days Early for Beta Users"),
        help=
        _("Enter the number of days before the start date that beta users can access the course."
          ),
        scope=Scope.settings,
        default=None,
    )
    static_asset_path = String(
        display_name=_("Static Asset Path"),
        help=
        _("Enter the path to use for files on the Files & Uploads page. This value overrides the Studio default, c4x://."
          ),
        scope=Scope.settings,
        default='',
    )
    text_customization = Dict(
        display_name=_("Text Customization"),
        help=_(
            "Enter string customization substitutions for particular locations."
        ),
        scope=Scope.settings,
    )
    use_latex_compiler = Boolean(
        display_name=_("Enable LaTeX Compiler"),
        help=
        _("Enter true or false. If true, you can use the LaTeX templates for HTML components and advanced Problem components."
          ),
        default=False,
        scope=Scope.settings)
    max_attempts = Integer(
        display_name=_("Maximum Attempts"),
        help=
        _("Enter the maximum number of times a student can try to answer problems. This is a course-wide setting, but you can specify a different number when you create an individual problem. To allow unlimited attempts, enter null."
          ),
        values={"min": 0},
        scope=Scope.settings)
    matlab_api_key = String(
        display_name=_("Matlab API key"),
        help=
        _("Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. "
          "This key is granted for exclusive use in this course for the specified duration. "
          "Do not share the API key with other courses. Notify MathWorks immediately "
          "if you believe the key is exposed or compromised. To obtain a key for your course, "
          "or to report an issue, please contact [email protected]"),
        scope=Scope.settings)
    # This is should be scoped to content, but since it's defined in the policy
    # file, it is currently scoped to settings.
    user_partitions = UserPartitionList(
        display_name=_("Experiment Group Configurations"),
        help=
        _("Enter the configurations that govern how students are grouped for content experiments."
          ),
        default=[],
        scope=Scope.settings)
    video_speed_optimizations = Boolean(
        display_name=_("Enable video caching system"),
        help=
        _("Enter true or false. If true, video caching will be used for HTML5 videos."
          ),
        default=True,
        scope=Scope.settings)
Beispiel #14
0
class RandomizeFields(object):
    choice = Integer(help="Which random child was chosen", scope=Scope.user_state)
class Workflow2XBlock(XBlock):
    """
    这是学生回答习题的,需要保存每个学生的回答状态
    """

    logger = Util.logger(Config.loggerConfig)
    gitlabRepo = GitRepo(dict(Config.teacherGitlab, **{'logger': logger}))

    # 这是xblock 的特殊fields 用于指定xblock的名字
    display_name = String(display_name='Display Name', default=u'练习题工作流', scope=Scope.settings, help='Name of the component in the edxplatform')

    # 学生能够回答该问题的最大尝试次数,0表示无限制
    # 注意:下面的字段定义的Scope为user_state_summary,这样的设置让openedx允许从LMS修改这些字段
    #       处于安全性的考虑,我个人不建议这样的修改,但是老师一定需要这样的功能。
    maxTry = Integer(default=0, scope=Scope.content)
    #maxTry = Integer(default=0, scope=Scope.user_state_summary)
    # 当前block保存的题目
    questionJson = Dict(default={}, scope=Scope.content)
    #questionJson = Dict(default={}, scope=Scope.user_state_summary)
    # 当前block保存的题题号
    qNo = Integer(default=0, scope=Scope.content)
    #qNo = Integer(default=0, scope=Scope.user_state_summary)
    # 学生当前已经尝试的次数
    tried = Integer(default=0, scope=Scope.user_state)
    # 学生每次回答的记录
    answerList = List(default=None, scope=Scope.user_state)

    def resource_string(self, path):
        """Handy helper for getting resources from our kit."""
        data = pkg_resources.resource_string(__name__, path)
        return data.decode("utf8")

    def student_view(self, context=None):
        '''
        学生页面
        '''
        if self.inStudio():
            return self.author_view(context)
        html = self.resource_string("static/html/workflow2.html")
        frag = Fragment(html)
        frag.add_css(self.resource_string("static/css/workflow2.css"))
        frag.add_javascript_url('//cdn.bootcss.com/handlebars.js/4.0.5/handlebars.min.js')
        frag.add_javascript_url('//cdn.bootcss.com/showdown/1.3.0/showdown.min.js')
        frag.add_javascript(self.resource_string("static/js/src/workflow2.js"))
        frag.initialize_js('Workflow2XBlock')
        return frag

    def author_view(self, context=None):
        '''
        Studio上的缩略页面
        '''
        content = {
            'question': self.qNo,
            'maxTry': self.maxTry
        }
        frag = Fragment(unicode(json.dumps(content)))
        return frag

    def studio_view(self, context=None):
        '''
        Studio 上的配置页面
        '''
        html = self.resource_string("static/html/workflow2_config.html")
        frag = Fragment(unicode(html).format(qNo=self.qNo, maxTry=self.maxTry))
        frag.add_javascript(self.resource_string('static/js/src/workflow2_config.js'))
        frag.initialize_js('Workflow2XBlock')
        return frag

    def inStudio(self):
        '''
        检查当前是不是studio环境
        '''
        if hasattr(self.runtime, 'get_real_user'):
            return self.runtime.get_real_user is None
        else:
            # 在测试环境
            return False

    def genCurrentStatus(self, needGradeInfo):
        if not hasattr(self.runtime, "anonymous_student_id"):
            # 测试环境
            student = Test()
            student.email = '*****@*****.**'
            student.username = '******'
            student.is_staff = True
            graded, gradeInfo = (False, None)
        else:
            student = self.runtime.get_real_user(self.runtime.anonymous_student_id)
            if needGradeInfo:
                graded, gradeInfo = self.fetchGradeInfo(student, self.qNo)
                self.tried, self.answerList = self.fetchAnswerInfo(student, self.qNo)
            if self.answerList is None:
                self.tried, self.answerList = self.fetchAnswerInfo(student, self.qNo)

        studentEmail = student.email
        studentUsername = student.username
        studentIsStaff = student.is_staff
        tried = self.tried
        maxTry = self.maxTry

        content = {
            'maxTry': maxTry,
            'tried': tried,
            'student': {'email': studentEmail, 'username': studentUsername, 'is_staff': studentIsStaff},
            'answer': self.answerList,
            'question': self.questionJson
        }
        if needGradeInfo:
            return dict(content, **{'graded': graded, 'gradeInfo': gradeInfo})
        else:
            return content

    def fetchGradeInfo(self, student, qNo):
        '''
        获取学生该题的批改信息
        '''
        filepath = '%(emailHash)s/%(username)s/%(qNo)d/%(qNo)d.graded.json' % {
            'emailHash': hashlib.new('md5', student.email).hexdigest()[-2:],
            'username': student.username,
            'qNo': qNo
        }
        gradeInfo = self.gitlabRepo.readContent(filepath)
        if gradeInfo is None:
            graded = False
        else:
            graded = True
        return (graded, gradeInfo)

    def fetchAnswerInfo(self, student, qNo):
        '''
        从gitlab获取学生的回答信息,并保存
        '''
        filepath = '%(emailHash)s/%(username)s/%(qNo)d/%(qNo)d.json' % {
            'emailHash': hashlib.new('md5', student.email).hexdigest()[-2:],
            'username': student.username,
            'qNo': qNo
        }
        answerInfo = self.gitlabRepo.readContent(filepath)
        if answerInfo is None:
            return (0, [])
        else:
            self.logger.info('fetch answer info from gitlab')
            return (answerInfo['tried'], answerInfo['answer'])

    @XBlock.json_handler
    def getCurrentStatus(self, data, suffix=''):
        try:
            status = self.genCurrentStatus(True)
            return {'code': 0, 'desc': 'ok', 'result': status}
        except Exception as e:
            self.logger.exception('ERROR getCurrentStatus %s' % (str(e)))
            return {'code': 1, 'dese': str(e)}

    @XBlock.json_handler
    def studentSubmit(self, data, suffix=''):
        try:
            student = self.runtime.get_real_user(self.runtime.anonymous_student_id)

            t = datetime.datetime.now() + datetime.timedelta(hours=12)
            createtime = t.strftime('%Y-%m-%d:%H:%M:%S')
            answerItem = {'time': createtime, 'answer': data['answer']}
            self.logger.debug('answerItem %s' % str(answerItem))
            self.answerList.append(answerItem)
            self.logger.debug('answerList %s' % str(self.answerList))
            self.tried += 1
            # 删除多余的历史数据
            if len(self.answerList) > Config.maxSizeOfAnswerList:
                self.answerList = self.answerList[-(Config.maxSizeOfAnswerList):]

            content = self.genCurrentStatus(False)
            # push to gitlab
            filepath = '%(emailHash)s/%(username)s/%(qNo)d/%(qNo)d.json' % {
                'emailHash': hashlib.new('md5', student.email).hexdigest()[-2:],
                'username': student.username,
                'qNo': self.qNo
            }
            oldContent = self.gitlabRepo.readContent(filepath)
            if oldContent is None:
                self.gitlabRepo.createContent(json.dumps(content, ensure_ascii=False, indent=4), filepath, 'create %s' % filepath)
            else:
                self.gitlabRepo.updateContent(json.dumps(content, ensure_ascii=False, indent=4), filepath, 'update %s' % filepath)

            self.logger.info('studentSubmit [student=%s] [tried=%d] [maxTry=%d] [answer=%s] [qNo=%d]' % (
                (student.email, student.username),
                self.tried,
                self.maxTry,
                json.dumps(answerItem),
                self.qNo
            ))
            return {'code': 0, 'desc': 'ok', 'result': self.genCurrentStatus(False)}
        except Exception as e:
            self.logger.exception('ERROR student_submit %s' % str(e))
            return {'code': 1, 'dese': str(e.args)}

    @XBlock.json_handler
    def studioSubmit(self, data, suffix=''):
        '''
        用于配置XBlock的题目,以及每个学生的回答次数
        data.q_number   题号
        data.max_try    最大尝试的次数
        '''
        try:
            self.logger.info('studioSubmit data=%s' % str(data))
            # 保存max_try
            self.maxTry = int(data['maxTry'])

            # 从github获取题号对应的题目json数据
            q_number = int(data['qNo'])
            self.qNo = q_number
            url = Config.getQuestionJsonUrl % {
                'qDir': ((q_number - 1) / 100) + 1,
                'qNo': q_number,
            }
            self.logger.info('studioSubmint url=%s' % url)
            res_data = urllib2.urlopen(url)
            res = res_data.read()
            res = json.loads(res)
            if 'content' in res:
                content = base64.b64decode(res['content'])
                self.questionJson = json.loads(content)
                self.logger.info('get question from remote [qNo=%s]' % (q_number))
                return {'code': 0, 'desc': 'ok'}
            else:
                self.logger.warning('ERROR studioSubmit: Cannot read question json [qNo=%d] [msg=%s]' % (q_number, res['message']))
                return {'code': 2, 'desc': res['message']}
        except Exception as e:
            self.logger.exception('ERROR')
            return {'code': 1, 'dese': str(e.args)}

    # workbench while developing your XBlock.
    @staticmethod
    def workbench_scenarios():
        """A canned scenario for display in the workbench."""
        return [
            ("Workflow2XBlock",
             """<workflow2/>
             """),
            ("Workflow2XBlock-test",
                """
                <workflow2 maxTry="5" questionJson='{"status":"error","knowledge":["文件系统"],"degree_of_difficulty":1,"explain":"解释\n","question":"文件的逻辑结构的基本形式有**(A)**,__(B)__和__(C)__。\\n```\\n$ pip install\\n```","source":"网络","answer":"解释\n","type":"fill_in_the_blank","options":["A.", "B.", "C."],"q_number":396}'/>
             """),
            ("Multiple Workflow2XBlock",
             """<vertical_demo>
                <workflow2 maxTry="2" questionJson='{"status":"ok","knowledge":["操作系统概述"],"degree_of_difficulty":1,"explain":"B\n","question":"批处理系统的主要缺点是 。\n","source":"网络","answer":"B","type":"single_answer","options":["A.CPU的利用率不高","B.失去了交互性","C.不具备并行性","D.以上都不是"],"q_number":1002}'/>
                <workflow2 maxTry="0" questionJson='{"status":"ok","knowledge":["调查问卷"],"degree_of_difficulty":1,"explain":"解释\n","question":"为什么要学这门课?\n","source":"网络","answer":"A","type":"multi_answer","options":["A.对内容有兴趣","B.内容与自己的目标相一致,结果有用","C.由于学分要求,必须选","D.其他,请注明原因"],"q_number":1137}' answerList='[{"time":"2012-01-01 13:20","answer":"A"}]'/>
                </vertical_demo>
             """),
        ]
class FreeTextResponseModelMixin(object):
    """
    Handle data access for Image Modal XBlock instances
    """

    editable_fields = [
        'display_name',
        'description',
        'description_required',
        'description_upperlimit',
        'comments',
        'comments_upperlimit',
        'prompt',
        'weight',
        'max_attempts',
        'display_correctness',
        'min_word_count',
        'max_word_count',
        'fullcredit_keyphrases',
        'halfcredit_keyphrases',
        'submitted_message',
        'display_other_student_responses',
        'saved_message',
    ]

    display_correctness = Boolean(
        display_name=_('Display Correctness?'),
        help=_(
            'This is a flag that indicates if the indicator '
            'icon should be displayed after a student enters '
            'their response'
        ),
        default=True,
        scope=Scope.settings,
    )
    display_other_student_responses = Boolean(
        display_name=_('Display Other Student Responses'),
        help=_(
            'This will display other student responses to the '
            'student after they submit their response.'
        ),
        default=False,
        scope=Scope.settings,
    )
    displayable_answers = List(
        default=[],
        scope=Scope.user_state_summary,
        help=_('System selected answers to give to students'),
    )
    display_name = String(
        display_name=_('Display Name'),
        help=_(
            'This is the title for this question type'
        ),
        default='Free-text Response',
        scope=Scope.settings,
    )
    description = String(
        display_name=_('Description'),
        help=_(
            'This is the title for this question type'
        ),
        default='Description',
        scope=Scope.settings,
    )
    description_required = String(
        display_name=_('Description Required Message'),
        help=_(
            'This is the validation message for description when required'
        ),
        default='The description is required',
        scope=Scope.settings,
    )
    description_upperlimit = String(
        display_name=_('Description Upper Limit Message'),
        help=_(
            'This is the validation message for description when over the word count limit'
        ),
        default='The description has too many words (max 10,000 words)',
        scope=Scope.settings,
    )
    comments = String(
        display_name=_('Comments'),
        help=_(
            'This is the title for this question type'
        ),
        default='Comments',
        scope=Scope.settings,
    )
    comments_upperlimit = String(
        display_name=_('Comments Upper Limit Message'),
        help=_(
            'This is the validation message for comments when over the word count limit'
        ),
        default='The comments have too many words (max 10,000 words)',
        scope=Scope.settings,
    )
    fullcredit_keyphrases = List(
        display_name=_('Full-Credit Key Phrases'),
        help=_(
            'This is a list of words or phrases, one of '
            'which must be present in order for the student\'s answer '
            'to receive full credit'
        ),
        default=[],
        scope=Scope.settings,
    )
    halfcredit_keyphrases = List(
        display_name=_('Half-Credit Key Phrases'),
        help=_(
            'This is a list of words or phrases, one of '
            'which must be present in order for the student\'s answer '
            'to receive half credit'
        ),
        default=[],
        scope=Scope.settings,
    )
    max_attempts = Integer(
        display_name=_('Maximum Number of Attempts'),
        help=_(
            'This is the maximum number of times a '
            'student is allowed to attempt the problem'
        ),
        default=0,
        values={'min': 1},
        scope=Scope.settings,
    )
    max_word_count = Integer(
        display_name=_('Maximum Word Count'),
        help=_(
            'This is the maximum number of words allowed for this '
            'question'
        ),
        default=10000,
        values={'min': 1},
        scope=Scope.settings,
    )
    min_word_count = Integer(
        display_name=_('Minimum Word Count'),
        help=_(
            'This is the minimum number of words required '
            'for this question'
        ),
        default=1,
        values={'min': 1},
        scope=Scope.settings,
    )
    prompt = String(
        display_name=_('Prompt'),
        help=_(
            'This is the prompt students will see when '
            'asked to enter their response'
        ),
        default='Please enter your response within this text area',
        scope=Scope.settings,
        multiline_editor=True,
    )
    submitted_message = String(
        display_name=_('Submission Received Message'),
        help=_(
            'This is the message students will see upon '
            'submitting their response'
        ),
        default='Your submission has been received',
        scope=Scope.settings,
    )
    weight = Integer(
        display_name=_('Weight'),
        help=_(
            'This assigns an integer value representing '
            'the weight of this problem'
        ),
        default=0,
        values={'min': 1},
        scope=Scope.settings,
    )
    saved_message = String(
        display_name=_('Draft Received Message'),
        help=_(
            'This is the message students will see upon '
            'submitting a draft response'
        ),
        default=(
            'Your answers have been saved but not graded. '
            'Click "Submit" to grade them.'
        ),
        scope=Scope.settings,
    )
    count_attempts = Integer(
        default=0,
        scope=Scope.user_state,
    )
    score = Float(
        default=0.0,
        scope=Scope.user_state,
    )
    student_answer = String(
        default='',
        scope=Scope.user_state,
    )
    student_comments= String(
        default='',
        scope=Scope.user_state,
    )
    has_score = True
    show_in_read_only_mode = True

    def store_student_response(self):
        """
        Submit a student answer to the answer pool by appending the given
        answer to the end of the list.
        """
        # if the answer is wrong, do not display it
        if self.score != Credit.full.value:
            return

        student_id = self.get_student_id()
        # remove any previous answers the student submitted
        for index, response in enumerate(self.displayable_answers):
            if response['student_id'] == student_id:
                del self.displayable_answers[index]
                break

        self.displayable_answers.append({
            'student_id': student_id,
            'answer': self.student_answer,
            'comments': self.student_comments,
        })

        # Want to store extra response so student can still see
        # MAX_RESPONSES answers if their answer is in the pool.
        response_index = -(MAX_RESPONSES+1)
        self.displayable_answers = self.displayable_answers[response_index:]

    def max_score(self):
        """
        Returns the configured number of possible points for this component.
        Arguments:
            None
        Returns:
            float: The number of possible points for this component
        """
        return self.weight

    def _compute_score(self):
        """
        Computes and publishes the user's core for the XBlock
        based on their answer
        """
        credit = self._determine_credit()
        self.score = credit.value
        try:
            self.runtime.publish(
                self,
                'grade',
                {
                    'value': self.score,
                    'max_value': Credit.full.value
                }
            )
        except IntegrityError:
            pass
class InheritanceMixin(XBlockMixin):
    """Field definitions for inheritable fields."""

    graded = Boolean(
        help="Whether this module contributes to the final course grade",
        scope=Scope.settings,
        default=False,
    )
    start = Date(
        help="Start time when this module is visible",
        default=datetime(2030, 1, 1, tzinfo=UTC),
        scope=Scope.settings
    )
    due = Date(
        display_name=_("Due Date"),
        help=_("Enter the default date by which problems are due."),
        scope=Scope.settings,
    )
    visible_to_staff_only = Boolean(
        help=_("If true, can be seen only by course staff, regardless of start date."),
        default=False,
        scope=Scope.settings,
    )
    course_edit_method = String(
        display_name=_("Course Editor"),
        help=_("Enter the method by which this course is edited (\"XML\" or \"Studio\")."),
        default="Studio",
        scope=Scope.settings,
        deprecated=True  # Deprecated because user would not change away from Studio within Studio.
    )
    giturl = String(
        display_name=_("GIT URL"),
        help=_("Enter the URL for the course data GIT repository."),
        scope=Scope.settings
    )
    xqa_key = String(
        display_name=_("XQA Key"),
        help=_("This setting is not currently supported."), scope=Scope.settings,
        deprecated=True
    )
    annotation_storage_url = String(
        help=_("Enter the location of the annotation storage server. The textannotation, videoannotation, and imageannotation advanced modules require this setting."),
        scope=Scope.settings,
        default="http://your_annotation_storage.com",
        display_name=_("URL for Annotation Storage")
    )
    annotation_token_secret = String(
        help=_("Enter the secret string for annotation storage. The textannotation, videoannotation, and imageannotation advanced modules require this string."),
        scope=Scope.settings,
        default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
        display_name=_("Secret Token String for Annotation")
    )
    graceperiod = Timedelta(
        help="Amount of time after the due date that submissions will be accepted",
        scope=Scope.settings,
    )
    group_access = Dict(
        help=_("Enter the ids for the content groups this problem belongs to."),
        scope=Scope.settings,
    )
    showanswer = String(
        display_name=_("Show Answer"),
        help=_("Specify when the Show Answer button appears for each problem. Valid values are \"always\", \"answered\", \"attempted\", \"closed\", \"finished\", \"past_due\", and \"never\"."),
        scope=Scope.settings,
        default="finished",
    )
    rerandomize = String(
        display_name=_("Randomization"),
        help=_("Specify how often variable values in a problem are randomized when a student loads the problem. Valid values are \"always\", \"onreset\", \"never\", and \"per_student\". This setting only applies to problems that have randomly generated numeric values."),
        scope=Scope.settings,
        default="never",
    )
    days_early_for_beta = Float(
        display_name=_("Days Early for Beta Users"),
        help=_("Enter the number of days before the start date that beta users can access the course."),
        scope=Scope.settings,
        default=None,
    )
    static_asset_path = String(
        display_name=_("Static Asset Path"),
        help=_("Enter the path to use for files on the Files & Uploads page. This value overrides the Studio default, c4x://."),
        scope=Scope.settings,
        default='',
    )
    text_customization = Dict(
        display_name=_("Text Customization"),
        help=_("Enter string customization substitutions for particular locations."),
        scope=Scope.settings,
    )
    use_latex_compiler = Boolean(
        display_name=_("Enable LaTeX Compiler"),
        help=_("Enter true or false. If true, you can use the LaTeX templates for HTML components and advanced Problem components."),
        default=False,
        scope=Scope.settings
    )
    max_attempts = Integer(
        display_name=_("Maximum Attempts"),
        help=_("Enter the maximum number of times a student can try to answer problems. By default, Maximum Attempts is set to null, meaning that students have an unlimited number of attempts for problems. You can override this course-wide setting for individual problems. However, if the course-wide setting is a specific number, you cannot set the Maximum Attempts for individual problems to unlimited."),
        values={"min": 0}, scope=Scope.settings
    )
    matlab_api_key = String(
        display_name=_("Matlab API key"),
        help=_("Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. "
               "This key is granted for exclusive use in this course for the specified duration. "
               "Do not share the API key with other courses. Notify MathWorks immediately "
               "if you believe the key is exposed or compromised. To obtain a key for your course, "
               "or to report an issue, please contact [email protected]"),
        scope=Scope.settings
    )
    # This is should be scoped to content, but since it's defined in the policy
    # file, it is currently scoped to settings.
    user_partitions = UserPartitionList(
        display_name=_("Group Configurations"),
        help=_("Enter the configurations that govern how students are grouped together."),
        default=[],
        scope=Scope.settings
    )
    video_speed_optimizations = Boolean(
        display_name=_("Enable video caching system"),
        help=_("Enter true or false. If true, video caching will be used for HTML5 videos."),
        default=True,
        scope=Scope.settings
    )

    reset_key = "DEFAULT_SHOW_RESET_BUTTON"
    default_reset_button = getattr(settings, reset_key) if hasattr(settings, reset_key) else False
    show_reset_button = Boolean(
        display_name=_("Show Reset Button for Problems"),
        help=_("Enter true or false. If true, problems in the course default to always displaying a 'Reset' button. You can "
               "override this in each problem's settings. All existing problems are affected when this course-wide setting is changed."),
        scope=Scope.settings,
        default=default_reset_button
    )
    edxnotes = Boolean(
        display_name=_("Enable Student Notes"),
        help=_("Enter true or false. If true, students can use the Student Notes feature."),
        default=False,
        scope=Scope.settings
    )
    edxnotes_visibility = Boolean(
        display_name="Student Notes Visibility",
        help=_("Indicates whether Student Notes are visible in the course. "
               "Students can also show or hide their notes in the courseware."),
        default=True,
        scope=Scope.user_info
    )

    in_entrance_exam = Boolean(
        display_name=_("Tag this module as part of an Entrance Exam section"),
        help=_("Enter true or false. If true, answer submissions for problem modules will be "
               "considered in the Entrance Exam scoring/gating algorithm."),
        scope=Scope.settings,
        default=False
    )
Beispiel #18
0
class LibraryContentBlock(
    MakoTemplateBlockBase,
    XmlMixin,
    XModuleDescriptorToXBlockMixin,
    XModuleToXBlockMixin,
    HTMLSnippet,
    ResourceTemplates,
    XModuleMixin,
    StudioEditableBlock,
):
    """
    An XBlock whose children are chosen dynamically from a content library.
    Can be used to create randomized assessments among other things.

    Note: technically, all matching blocks from the content library are added
    as children of this block, but only a subset of those children are shown to
    any particular student.
    """
    # pylint: disable=abstract-method
    has_children = True
    has_author_view = True

    resources_dir = 'assets/library_content'

    preview_view_js = {
        'js': [],
        'xmodule_js': resource_string(__name__, 'js/src/xmodule.js'),
    }
    preview_view_css = {
        'scss': [],
    }

    mako_template = 'widgets/metadata-edit.html'
    studio_js_module_name = "VerticalDescriptor"
    studio_view_js = {
        'js': [
            resource_string(__name__, 'js/src/vertical/edit.js'),
        ],
        'xmodule_js': resource_string(__name__, 'js/src/xmodule.js'),
    }
    studio_view_css = {
        'scss': [],
    }

    show_in_read_only_mode = True

    completion_mode = XBlockCompletionMode.AGGREGATOR

    display_name = String(
        display_name=_("Display Name"),
        help=_("The display name for this component."),
        default="Randomized Content Block",
        scope=Scope.settings,
    )
    source_library_id = String(
        display_name=_("Library"),
        help=_("Select the library from which you want to draw content."),
        scope=Scope.settings,
        values_provider=lambda instance: instance.source_library_values(),
    )
    source_library_version = String(
        # This is a hidden field that stores the version of source_library when we last pulled content from it
        display_name=_("Library Version"),
        scope=Scope.settings,
    )
    mode = String(
        display_name=_("Mode"),
        help=_("Determines how content is drawn from the library"),
        default="random",
        values=[
            {"display_name": _("Choose n at random"), "value": "random"}
            # Future addition: Choose a new random set of n every time the student refreshes the block, for self tests
            # Future addition: manually selected blocks
        ],
        scope=Scope.settings,
    )
    max_count = Integer(
        display_name=_("Count"),
        help=_("Enter the number of components to display to each student."),
        default=1,
        scope=Scope.settings,
    )
    capa_type = String(
        display_name=_("Problem Type"),
        help=_('Choose a problem type to fetch from the library. If "Any Type" is selected no filtering is applied.'),
        default=ANY_CAPA_TYPE_VALUE,
        values=_get_capa_types(),
        scope=Scope.settings,
    )
    selected = List(
        # This is a list of (block_type, block_id) tuples used to record
        # which random/first set of matching blocks was selected per user
        default=[],
        scope=Scope.user_state,
    )

    @property
    def source_library_key(self):
        """
        Convenience method to get the library ID as a LibraryLocator and not just a string
        """
        return LibraryLocator.from_string(self.source_library_id)

    @classmethod
    def make_selection(cls, selected, children, max_count, mode):
        """
        Dynamically selects block_ids indicating which of the possible children are displayed to the current user.

        Arguments:
            selected - list of (block_type, block_id) tuples assigned to this student
            children - children of this block
            max_count - number of components to display to each student
            mode - how content is drawn from the library

        Returns:
            A dict containing the following keys:

            'selected' (set) of (block_type, block_id) tuples assigned to this student
            'invalid' (set) of dropped (block_type, block_id) tuples that are no longer valid
            'overlimit' (set) of dropped (block_type, block_id) tuples that were previously selected
            'added' (set) of newly added (block_type, block_id) tuples
        """
        rand = random.Random()

        selected_keys = set(tuple(k) for k in selected)  # set of (block_type, block_id) tuples assigned to this student

        # Determine which of our children we will show:
        valid_block_keys = set((c.block_type, c.block_id) for c in children)

        # Remove any selected blocks that are no longer valid:
        invalid_block_keys = (selected_keys - valid_block_keys)
        if invalid_block_keys:
            selected_keys -= invalid_block_keys

        # If max_count has been decreased, we may have to drop some previously selected blocks:
        overlimit_block_keys = set()
        if len(selected_keys) > max_count:
            num_to_remove = len(selected_keys) - max_count
            overlimit_block_keys = set(rand.sample(selected_keys, num_to_remove))
            selected_keys -= overlimit_block_keys

        # Do we have enough blocks now?
        num_to_add = max_count - len(selected_keys)

        added_block_keys = None
        if num_to_add > 0:
            # We need to select [more] blocks to display to this user:
            pool = valid_block_keys - selected_keys
            if mode == "random":
                num_to_add = min(len(pool), num_to_add)
                added_block_keys = set(rand.sample(pool, num_to_add))
                # We now have the correct n random children to show for this user.
            else:
                raise NotImplementedError("Unsupported mode.")
            selected_keys |= added_block_keys

        if any((invalid_block_keys, overlimit_block_keys, added_block_keys)):
            selected = list(selected_keys)
            random.shuffle(selected)

        return {
            'selected': selected,
            'invalid': invalid_block_keys,
            'overlimit': overlimit_block_keys,
            'added': added_block_keys,
        }

    def _publish_event(self, event_name, result, **kwargs):
        """
        Helper method to publish an event for analytics purposes
        """
        event_data = {
            "location": six.text_type(self.location),
            "result": result,
            "previous_count": getattr(self, "_last_event_result_count", len(self.selected)),
            "max_count": self.max_count,
        }
        event_data.update(kwargs)
        self.runtime.publish(self, "edx.librarycontentblock.content.{}".format(event_name), event_data)
        self._last_event_result_count = len(result)  # pylint: disable=attribute-defined-outside-init

    @classmethod
    def publish_selected_children_events(cls, block_keys, format_block_keys, publish_event):
        """
        Helper method for publishing events when children blocks are
        selected/updated for a user.  This helper is also used by
        the ContentLibraryTransformer.

        Arguments:

            block_keys -
                A dict describing which events to publish (add or
                remove), see `make_selection` above for format details.

            format_block_keys -
                A function to convert block keys to the format expected
                by publish_event. Must have the signature:

                    [(block_type, block_id)] -> T

                Where T is a collection of block keys as accepted by
                `publish_event`.

            publish_event -
                Function that handles the actual publishing.  Must have
                the signature:

                    <'removed'|'assigned'> -> result:T -> removed:T -> reason:str -> None

                Where T is a collection of block_keys as returned by
                `format_block_keys`.
        """
        if block_keys['invalid']:
            # reason "invalid" means deleted from library or a different library is now being used.
            publish_event(
                "removed",
                result=format_block_keys(block_keys['selected']),
                removed=format_block_keys(block_keys['invalid']),
                reason="invalid"
            )

        if block_keys['overlimit']:
            publish_event(
                "removed",
                result=format_block_keys(block_keys['selected']),
                removed=format_block_keys(block_keys['overlimit']),
                reason="overlimit"
            )

        if block_keys['added']:
            publish_event(
                "assigned",
                result=format_block_keys(block_keys['selected']),
                added=format_block_keys(block_keys['added'])
            )

    def selected_children(self):
        """
        Returns a list() of block_ids indicating which of the possible children
        have been selected to display to the current user.

        This reads and updates the "selected" field, which has user_state scope.

        Note: the return value (self.selected) contains block_ids. To get
        actual BlockUsageLocators, it is necessary to use self.children,
        because the block_ids alone do not specify the block type.
        """
        block_keys = self.make_selection(self.selected, self.children, self.max_count, "random")  # pylint: disable=no-member

        # Publish events for analytics purposes:
        lib_tools = self.runtime.service(self, 'library_tools')
        format_block_keys = lambda keys: lib_tools.create_block_analytics_summary(self.location.course_key, keys)
        self.publish_selected_children_events(
            block_keys,
            format_block_keys,
            self._publish_event,
        )

        if any(block_keys[changed] for changed in ('invalid', 'overlimit', 'added')):
            # Save our selections to the user state, to ensure consistency:
            selected = block_keys['selected']
            self.selected = selected  # TODO: this doesn't save from the LMS "Progress" page.

        return self.selected

    def _get_selected_child_blocks(self):
        """
        Generator returning XBlock instances of the children selected for the
        current user.
        """
        for block_type, block_id in self.selected_children():
            yield self.runtime.get_block(self.location.course_key.make_usage_key(block_type, block_id))

    def student_view(self, context):  # lint-amnesty, pylint: disable=missing-function-docstring
        fragment = Fragment()
        contents = []
        child_context = {} if not context else copy(context)

        for child in self._get_selected_child_blocks():
            if child is None:
                # TODO: Fix the underlying issue in TNL-7424
                # This shouldn't be happening, but does for an as-of-now
                # unknown reason. Until we address the underlying issue,
                # let's at least log the error explicitly, ignore the
                # exception, and prevent the page from resulting in a
                # 500-response.
                logger.error('Skipping display for child block that is None')
                continue
            for displayable in child.displayable_items():
                rendered_child = displayable.render(STUDENT_VIEW, child_context)
                fragment.add_fragment_resources(rendered_child)
                contents.append({
                    'id': text_type(displayable.location),
                    'content': rendered_child.content,
                })

        fragment.add_content(self.system.render_template('vert_module.html', {
            'items': contents,
            'xblock_context': context,
            'show_bookmark_button': False,
            'watched_completable_blocks': set(),
            'completion_delay_ms': None,
        }))
        return fragment

    def author_view(self, context):
        """
        Renders the Studio views.
        Normal studio view: If block is properly configured, displays library status summary
        Studio container view: displays a preview of all possible children.
        """
        fragment = Fragment()
        root_xblock = context.get('root_xblock')
        is_root = root_xblock and root_xblock.location == self.location

        if is_root:
            # User has clicked the "View" link. Show a preview of all possible children:
            if self.children:  # pylint: disable=no-member
                fragment.add_content(self.system.render_template("library-block-author-preview-header.html", {
                    'max_count': self.max_count,
                    'display_name': self.display_name or self.url_name,
                }))
                context['can_edit_visibility'] = False
                context['can_move'] = False
                self.render_children(context, fragment, can_reorder=False, can_add=False)
        # else: When shown on a unit page, don't show any sort of preview -
        # just the status of this block in the validation area.

        # The following JS is used to make the "Update now" button work on the unit page and the container view:
        fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/library_content_edit.js'))
        fragment.initialize_js('LibraryContentAuthorView')
        return fragment

    def studio_view(self, _context):
        """
        Return the studio view.
        """
        fragment = Fragment(
            self.system.render_template(self.mako_template, self.get_context())
        )
        add_webpack_to_fragment(fragment, 'LibraryContentBlockStudio')
        shim_xmodule_js(fragment, self.studio_js_module_name)
        return fragment

    def get_child_descriptors(self):
        """
        Return only the subset of our children relevant to the current student.
        """
        return list(self._get_selected_child_blocks())

    @property
    def non_editable_metadata_fields(self):
        non_editable_fields = super().non_editable_metadata_fields
        # The only supported mode is currently 'random'.
        # Add the mode field to non_editable_metadata_fields so that it doesn't
        # render in the edit form.
        non_editable_fields.extend([
            LibraryContentBlock.mode,
            LibraryContentBlock.source_library_version,
        ])
        return non_editable_fields

    @lazy
    def tools(self):
        """
        Grab the library tools service or raise an error.
        """
        return self.runtime.service(self, 'library_tools')

    def get_user_id(self):
        """
        Get the ID of the current user.
        """
        user_service = self.runtime.service(self, 'user')
        if user_service:
            # May be None when creating bok choy test fixtures
            user_id = user_service.get_current_user().opt_attrs.get('edx-platform.user_id', None)
        else:
            user_id = None
        return user_id

    @XBlock.handler
    def refresh_children(self, request=None, suffix=None):  # lint-amnesty, pylint: disable=unused-argument
        """
        Refresh children:
        This method is to be used when any of the libraries that this block
        references have been updated. It will re-fetch all matching blocks from
        the libraries, and copy them as children of this block. The children
        will be given new block_ids, but the definition ID used should be the
        exact same definition ID used in the library.

        This method will update this block's 'source_library_id' field to store
        the version number of the libraries used, so we easily determine if
        this block is up to date or not.
        """
        user_perms = self.runtime.service(self, 'studio_user_permissions')
        if not self.tools:
            return Response("Library Tools unavailable in current runtime.", status=400)
        self.tools.update_children(self, user_perms)
        return Response()

    # Copy over any overridden settings the course author may have applied to the blocks.
    def _copy_overrides(self, store, user_id, source, dest):
        """
        Copy any overrides the user has made on blocks in this library.
        """
        for field in six.itervalues(source.fields):
            if field.scope == Scope.settings and field.is_set_on(source):
                setattr(dest, field.name, field.read_from(source))
        if source.has_children:
            source_children = [self.runtime.get_block(source_key) for source_key in source.children]
            dest_children = [self.runtime.get_block(dest_key) for dest_key in dest.children]
            for source_child, dest_child in zip(source_children, dest_children):
                self._copy_overrides(store, user_id, source_child, dest_child)
        store.update_item(dest, user_id)

    def studio_post_duplicate(self, store, source_block):
        """
        Used by the studio after basic duplication of a source block. We handle the children
        ourselves, because we have to properly reference the library upstream and set the overrides.

        Otherwise we'll end up losing data on the next refresh.
        """
        # The first task will be to refresh our copy of the library to generate the children.
        # We must do this at the currently set version of the library block. Otherwise we may not have
        # exactly the same children-- someone may be duplicating an out of date block, after all.
        user_id = self.get_user_id()
        user_perms = self.runtime.service(self, 'studio_user_permissions')
        if not self.tools:
            raise RuntimeError("Library tools unavailable, duplication will not be sane!")
        self.tools.update_children(self, user_perms, version=self.source_library_version)

        self._copy_overrides(store, user_id, source_block, self)

        # Children have been handled.
        return True

    def _validate_library_version(self, validation, lib_tools, version, library_key):
        """
        Validates library version
        """
        latest_version = lib_tools.get_library_version(library_key)
        if latest_version is not None:
            if version is None or version != six.text_type(latest_version):
                validation.set_summary(
                    StudioValidationMessage(
                        StudioValidationMessage.WARNING,
                        _(u'This component is out of date. The library has new content.'),
                        # TODO: change this to action_runtime_event='...' once the unit page supports that feature.
                        # See https://openedx.atlassian.net/browse/TNL-993
                        action_class='library-update-btn',
                        # Translators: {refresh_icon} placeholder is substituted to "↻" (without double quotes)
                        action_label=_(u"{refresh_icon} Update now.").format(refresh_icon=u"↻")
                    )
                )
                return False
        else:
            validation.set_summary(
                StudioValidationMessage(
                    StudioValidationMessage.ERROR,
                    _(u'Library is invalid, corrupt, or has been deleted.'),
                    action_class='edit-button',
                    action_label=_(u"Edit Library List.")
                )
            )
            return False
        return True

    def _set_validation_error_if_empty(self, validation, summary):
        """  Helper method to only set validation summary if it's empty """
        if validation.empty:
            validation.set_summary(summary)

    def validate(self):
        """
        Validates the state of this Library Content Module Instance. This
        is the override of the general XBlock method, and it will also ask
        its superclass to validate.
        """
        validation = super().validate()
        if not isinstance(validation, StudioValidation):
            validation = StudioValidation.copy(validation)
        library_tools = self.runtime.service(self, "library_tools")
        if not (library_tools and library_tools.can_use_library_content(self)):
            validation.set_summary(
                StudioValidationMessage(
                    StudioValidationMessage.ERROR,
                    _(
                        u"This course does not support content libraries. "
                        u"Contact your system administrator for more information."
                    )
                )
            )
            return validation
        if not self.source_library_id:
            validation.set_summary(
                StudioValidationMessage(
                    StudioValidationMessage.NOT_CONFIGURED,
                    _(u"A library has not yet been selected."),
                    action_class='edit-button',
                    action_label=_(u"Select a Library.")
                )
            )
            return validation
        lib_tools = self.runtime.service(self, 'library_tools')
        self._validate_library_version(validation, lib_tools, self.source_library_version, self.source_library_key)

        # Note: we assume refresh_children() has been called
        # since the last time fields like source_library_id or capa_types were changed.
        matching_children_count = len(self.children)  # pylint: disable=no-member
        if matching_children_count == 0:
            self._set_validation_error_if_empty(
                validation,
                StudioValidationMessage(
                    StudioValidationMessage.WARNING,
                    _(u'There are no matching problem types in the specified libraries.'),
                    action_class='edit-button',
                    action_label=_(u"Select another problem type.")
                )
            )

        if matching_children_count < self.max_count:
            self._set_validation_error_if_empty(
                validation,
                StudioValidationMessage(
                    StudioValidationMessage.WARNING,
                    (
                        ngettext(
                            u'The specified library is configured to fetch {count} problem, ',
                            u'The specified library is configured to fetch {count} problems, ',
                            self.max_count
                        ) +
                        ngettext(
                            u'but there is only {actual} matching problem.',
                            u'but there are only {actual} matching problems.',
                            matching_children_count
                        )
                    ).format(count=self.max_count, actual=matching_children_count),
                    action_class='edit-button',
                    action_label=_(u"Edit the library configuration.")
                )
            )

        return validation

    def source_library_values(self):
        """
        Return a list of possible values for self.source_library_id
        """
        lib_tools = self.runtime.service(self, 'library_tools')
        user_perms = self.runtime.service(self, 'studio_user_permissions')
        all_libraries = [
            (key, bleach.clean(name)) for key, name in lib_tools.list_available_libraries()
            if user_perms.can_read(key) or self.source_library_id == six.text_type(key)
        ]
        all_libraries.sort(key=lambda entry: entry[1])  # Sort by name
        if self.source_library_id and self.source_library_key not in [entry[0] for entry in all_libraries]:
            all_libraries.append((self.source_library_id, _(u"Invalid Library")))
        all_libraries = [(u"", _("No Library Selected"))] + all_libraries
        values = [{"display_name": name, "value": six.text_type(key)} for key, name in all_libraries]
        return values

    def editor_saved(self, user, old_metadata, old_content):  # lint-amnesty, pylint: disable=unused-argument
        """
        If source_library_id or capa_type has been edited, refresh_children automatically.
        """
        old_source_library_id = old_metadata.get('source_library_id', [])
        if (old_source_library_id != self.source_library_id or
                old_metadata.get('capa_type', ANY_CAPA_TYPE_VALUE) != self.capa_type):
            try:
                self.refresh_children()
            except ValueError:
                pass  # The validation area will display an error message, no need to do anything now.

    def has_dynamic_children(self):
        """
        Inform the runtime that our children vary per-user.
        See get_child_descriptors() above
        """
        return True

    def get_content_titles(self):
        """
        Returns list of friendly titles for our selected children only; without
        thi, all possible children's titles would be seen in the sequence bar in
        the LMS.

        This overwrites the get_content_titles method included in x_module by default.
        """
        titles = []
        for child in self.get_child_descriptors():
            titles.extend(child.get_content_titles())
        return titles

    @classmethod
    def definition_from_xml(cls, xml_object, system):
        children = [
            system.process_xml(etree.tostring(child)).scope_ids.usage_id
            for child in xml_object.getchildren()
        ]
        definition = {
            attr_name: json.loads(attr_value)
            for attr_name, attr_value in xml_object.attrib.items()
        }
        return definition, children

    def definition_to_xml(self, resource_fs):
        """ Exports Library Content Module to XML """
        xml_object = etree.Element('library_content')
        for child in self.get_children():
            self.runtime.add_block_as_child_node(child, xml_object)
        # Set node attributes based on our fields.
        for field_name, field in six.iteritems(self.fields):
            if field_name in ('children', 'parent', 'content'):
                continue
            if field.is_set_on(self):
                xml_object.set(field_name, six.text_type(field.read_from(self)))
        return xml_object
class LibraryContentFields(object):
    """
    Fields for the LibraryContentModule.

    Separated out for now because they need to be added to the module and the
    descriptor.
    """
    # Please note the display_name of each field below is used in
    # common/test/acceptance/pages/studio/library.py:StudioLibraryContentXBlockEditModal
    # to locate input elements - keep synchronized
    display_name = String(
        display_name=_("Display Name"),
        help=_("The display name for this component."),
        default="Randomized Content Block",
        scope=Scope.settings,
    )
    source_library_id = String(
        display_name=_("Library"),
        help=_("Select the library from which you want to draw content."),
        scope=Scope.settings,
        values_provider=lambda instance: instance.source_library_values(),
    )
    source_library_version = String(
        # This is a hidden field that stores the version of source_library when we last pulled content from it
        display_name=_("Library Version"),
        scope=Scope.settings,
    )
    mode = String(
        display_name=_("Mode"),
        help=_("Determines how content is drawn from the library"),
        default="random",
        values=
        [{
            "display_name": _("Choose n at random"),
            "value": "random"
        }
         # Future addition: Choose a new random set of n every time the student refreshes the block, for self tests
         # Future addition: manually selected blocks
         ],
        scope=Scope.settings,
    )
    max_count = Integer(
        display_name=_("Count"),
        help=_("Enter the number of components to display to each student."),
        default=1,
        scope=Scope.settings,
    )
    capa_type = String(
        display_name=_("Problem Type"),
        help=
        _('Choose a problem type to fetch from the library. If "Any Type" is selected no filtering is applied.'
          ),
        default=ANY_CAPA_TYPE_VALUE,
        values=_get_capa_types(),
        scope=Scope.settings,
    )
    selected = List(
        # This is a list of (block_type, block_id) tuples used to record
        # which random/first set of matching blocks was selected per user
        default=[],
        scope=Scope.user_state,
    )
    has_children = True

    @property
    def source_library_key(self):
        """
        Convenience method to get the library ID as a LibraryLocator and not just a string
        """
        return LibraryLocator.from_string(self.source_library_id)
Beispiel #20
0
class InheritanceMixin(XBlockMixin):
    """Field definitions for inheritable fields."""

    graded = Boolean(
        help="Whether this module contributes to the final course grade",
        scope=Scope.settings,
        default=False,
    )
    start = Date(help="Start time when this module is visible",
                 default=datetime(2030, 1, 1, tzinfo=UTC),
                 scope=Scope.settings)
    due = Date(
        help="Date that this problem is due by",
        scope=Scope.settings,
    )
    extended_due = Date(
        help="Date that this problem is due by for a particular student. This "
        "can be set by an instructor, and will override the global due "
        "date if it is set to a date that is later than the global due "
        "date.",
        default=None,
        scope=Scope.user_state,
    )
    course_edit_method = String(
        help="Method with which this course is edited.",
        default="Studio",
        scope=Scope.settings)
    giturl = String(
        help="url root for course data git repository",
        scope=Scope.settings,
    )
    xqa_key = String(help="DO NOT USE", scope=Scope.settings)
    annotation_storage_url = String(
        help="Location of Annotation backend",
        scope=Scope.settings,
        default="http://your_annotation_storage.com",
        display_name="Url for Annotation Storage")
    annotation_token_secret = String(
        help="Secret string for annotation storage",
        scope=Scope.settings,
        default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
        display_name="Secret Token String for Annotation")
    graceperiod = Timedelta(
        help=
        "Amount of time after the due date that submissions will be accepted",
        scope=Scope.settings,
    )
    showanswer = String(
        help="When to show the problem answer to the student",
        scope=Scope.settings,
        default="finished",
    )
    rerandomize = String(
        help="When to rerandomize the problem",
        scope=Scope.settings,
        default="never",
    )
    days_early_for_beta = Float(
        help="Number of days early to show content to beta users",
        scope=Scope.settings,
        default=None,
    )
    static_asset_path = String(
        help="Path to use for static assets - overrides Studio c4x://",
        scope=Scope.settings,
        default='',
    )
    text_customization = Dict(
        help="String customization substitutions for particular locations",
        scope=Scope.settings,
    )
    use_latex_compiler = Boolean(help="Enable LaTeX templates?",
                                 default=False,
                                 scope=Scope.settings)
    max_attempts = Integer(
        display_name="Maximum Attempts",
        help=
        ("Defines the number of times a student can try to answer this problem. "
         "If the value is not set, infinite attempts are allowed."),
        values={"min": 0},
        scope=Scope.settings)
Beispiel #21
0
class MultiChoiceXBlock(XBlock):
    """ Studio data """

    confidenceLevels = Dict(
        default={
            'low': {
                'correct': 1,
                'wrong': 0
            },
            'normal': {
                'correct': 1.5,
                'wrong': -0.5
            },
            'high': {
                'correct': 2,
                'wrong': -1
            }
        },
        scope=Scope.content,
    )
    """
    The confidence level for each question, ranging from low to high.
    The score goes from 0 and 1 for low, up to -1 and 2 for high.
    The score received is based on whether or not the selected answer(s)
    were correct. Used in ```CalculateGrade```

    See:
      |  ```CalculateGrade```
    """

    grade_dictionary = Dict(
        default={
            'gradeA': {
                'grade': 'A',
                'score': 90
            },
            'gradeB': {
                'grade': 'B',
                'score': 80
            },
            'gradeC': {
                'grade': 'C',
                'score': 50
            },
            'gradeD': {
                'grade': 'D',
                'score': 40
            },
            'gradeE': {
                'grade': 'E',
                'score': 35
            },
            'gradeF': {
                'grade': 'F',
                'score': 0
            },
        },
        scope=Scope.content,
    )
    """
    Dictionary containing the grades available, from A - F,
    with the relevant score for each grade. Used in ```CalculateGrade```

    See:
      |  ```CalculateGrade```
    """

    student_answer_dictionary = []
    """
    This dictionary contains the current student answers to the given questionnaire.
    For grade to be calculated properly, it expects objects of the class ```SubmittedAnswers```

    See:
      |  ```SubmittedAnswers```
      |  ```CalculateGrade```
    """

    bonus_point = 1.0
    """
    Bonus point given to each student for each successful guess on difficulty level questions
    """

    # test list for grading difficulty levels;
    # Filled with dummy data in the function:
    # grade_students_difficulty_level()
    # Don't remove without asking
    # ~LUCAS
    submitted_student_answers = []

    title = String(
        default="",
        scope=Scope.content,
    )

    description = String(
        default="",
        scope=Scope.content,
    )

    questions_json_list = List(
        default=[
            # {
            #     'id': 0,
            #     'question': 'Choose A, B or C',
            #     'alternatives': [{
            #         'id': '0',
            #         'text': 'A',
            #         'isCorrect': True
            #     }, {
            #         'id': '1',
            #         'text': 'B',
            #         'isCorrect': False
            #     }, {
            #         'id': '2',
            #         'text': 'C',
            #         'isCorrect': False
            #     }],
            #     'has_difficulty_level': True
            # },
            # {
            #     'id': 1,
            #     'question': 'Choose D, E or F',
            #     'alternatives': [{
            #         'id': '0',
            #         'text': 'D',
            #         'isCorrect': True
            #     }, {
            #         'id': '1',
            #         'text': 'E',
            #         'isCorrect': False
            #     }, {
            #         'id': '2',
            #         'text': 'F',
            #         'isCorrect': False
            #     }],
            #     'has_difficulty_level': False
            # }
        ],
        scope=Scope.content,
    )

    question_objects_list = List(
        default=[],
        scope=Scope.content,
    )

    maxScore = Integer(
        default=0,
        scope=Scope.content,
    )

    student_name = "Lars"
    student_id = Integer(
        default=123,
        scope=Scope.content,
    )

    #student_answerArray = [1, 1, 2]

    students = [{
        "ID": "1",
        "Firstname": "Trym",
        "Lastname": "Hansen",
        "Grade": "B"
    }, {
        "ID": "2",
        "Lastname": "Marlie",
        "Firstname": "Hella",
        "Grade": "D"
    }]

    #Dummy array for answers, based on available questions 1. dec
    students_sub = [
        {
            "id": "1",  #Identifies student
            "questions": {  #List of alternatives chosen by student.
                1:
                1,  #Question number and the corresponding chosen alternative
                2: 3
            },
            "confidence": {
                0: 0,  #not confident
                1: 2  #im certain
            },
            "difficulty": {
                0: 1  #above average
            }
        },
        {
            "id": "2",
            "questions": {
                1: 3,
                2: 1
            },
            "confidence": {
                0: 1,
                1: 0
            },
            "difficulty": {
                0: 0
            }
        }
    ]

    student_ans = [[1, 1, 1, 1], [2, 2, 2, 2]]
    ''' Student data '''
    student_answers = list()

    score = Integer(default=0, scope=Scope.user_state)

    has_score = False

    questionInterface = None

    def __init__(self, *args, **kwargs):
        super(XBlock, self).__init__(*args, **kwargs)
        if self.questions_json_list is None or len(
                self.questions_json_list) is 0:
            self.questions_json_list = self.get_default_questions_json()
            # if self.question_objects_list is None or len(self.question_objects_list) is 0:
            #     self.question_objects_list = self.get_default_questions_json()
            # self.xmodule_runtime = self.runtime
            # self.questionController = QuestionController(self)

    ''' Views '''

    def studio_view(self, context=None):
        tpl = Template(
            filename="multichoice/multichoice/static/html/manage_questions.html"
        )
        buf = StringIO()
        ctx = Context(buf, xblock=self)
        tpl.render_context(ctx)

        frag = Fragment(buf.getvalue())
        frag.add_css(self.resource_string("static/css/multichoice.css"))
        frag.add_css(self.resource_string("static/css/font-awesome.min.css"))
        frag.add_javascript(
            self.resource_string("static/js/src/manage_questions.js"))
        frag.initialize_js('MultiChoiceXBlock')
        return frag

    def student_view(self, context=None):
        tpl = Template(
            filename="multichoice/multichoice/static/html/student_view.html")
        buf = StringIO()
        ctx = Context(buf, xblock=self)
        tpl.render_context(ctx)

        frag = Fragment(buf.getvalue())
        frag.add_css(self.resource_string("static/css/student_view.css"))
        frag.add_javascript(
            self.resource_string("static/js/src/student_view.js"))
        frag.initialize_js('AnswerXBlock')
        return frag

    def author_view(self, context=None):
        tpl = Template(
            filename=
            "multichoice/multichoice/static/html/review_stud_quest.html")
        buf = StringIO()
        ctx = Context(buf, xblock=self)
        tpl.render_context(ctx)

        frag = Fragment(buf.getvalue())
        frag.add_css(self.resource_string("static/css/multichoice.css"))
        frag.add_css(self.resource_string("static/css/font-awesome.min.css"))
        frag.add_javascript(
            self.resource_string("static/js/src/questionnaire_review.js"))
        frag.initialize_js('MultiChoiceXBlock')
        return frag

    ''' JSON handler methods '''

    @XBlock.json_handler  #Returns array with all students that have submitted answers:
    def get_answers(self, data, suffix=''):
        return self.students_sub

    @XBlock.json_handler
    def save_student_answers(self, data, suffix=''):
        """
        Saves student answers passed from the student view,
        and returns a dictionary with correctness of the answers back to student

        Arguments:
            data: a dictionary that contains question ID, alternatives chosen by the student,
                and confidence level of the student.
                data =  {
                            questionId: {
                                chosen: [1, 2],
                                confidence: 'High'
                            }
                        }
        Returns:
            dict: a dictionary containing answer ID and corresponding correctness.
                dict =  {
                            1: 'true',
                            2: 'false'
                        }
        """

        self.student_answers.append(data)
        return_data = {}
        # print data['question_id']
        print self.questions_json_list
        # print "Q_ID: " + data['question_id']
        for answer_id in data['chosen']:
            answer_obj = SubmittedAnswer(int(data['question_id']),
                                         int(answer_id), data['confidence'])
            self.student_answer_dictionary.append(answer_obj)
            if self._is_answer_correct(int(answer_id),
                                       int(data['question_id'])):
                return_data[answer_id] = 'true'
            else:
                return_data[answer_id] = 'false'
        return return_data

    def _is_answer_correct(self, answer_id=int, question_id=int):
        """
        Looks for the answer in the questions dictionary and returns correctness value of the answer.

        Arguments:
            answer_id (int): string value of the answer ID.

        Returns:
            bool: correctness value for a particular answer.
        """
        try:
            for question in self.questions_json_list:
                print question
                print "Q_ID: " + str(question['id'])
                print "PASSED Q_ID: " + str(question_id)
                if question['id'] == question_id:
                    print True
                    for alternative in question['alternatives']:
                        if alternative['id'] == answer_id:
                            return alternative['isCorrect']

            # for alternative in self.questions_json_list[question_id]['alternatives']:
            #     print self.questions_json_list[question_id]['id']
            #     print alternative
            #     if alternative['id'] == answer_id:
            #         return alternative['isCorrect']
        except Exception as ex:
            print ex

    @XBlock.json_handler
    def get_grade(self, data, suffix=''):
        """
        Retrieves the score and grade for the questionnaire based
        on the students submitted answers. This achieved by calling
        the class ``CalculateGrade`` and the function ``__unicode__``
        which returns a string with the score and grade.

        Arguments:
            data:
            suffix:

        See:
            ``CalculateGrade.__unicode__``

        Returns:
            object: JSON object containing the string with the score and grade

        """
        grade = ''
        try:
            question_list = self.create_object_from_json()
            total_score = len(question_list)
            calc_grade = CalculateGrade(self, total_score, question_list)
            # This is for debugging, in case it does not work
            # (checks if dictionaries has content)
            # grade = calc_grade.check_if_lists_are_set()
            calc_grade.calculate_grade()
            # print the grade
            grade += calc_grade.__unicode__()
        except Exception, ex:
            grade += "<p>An exception occurred: " + str(ex) + ". "
            grade += "Failed at calculating grade."
        return {'grade': grade, 'answers': data}
Beispiel #22
0
class PureXBlock(XBlock):
    """Class for testing pure XBlocks."""

    has_children = True
    field1 = String(default="something", scope=Scope.user_state)
    field2 = Integer(scope=Scope.user_state)
Beispiel #23
0
class QuestionGeneratorXBlock(XBlock, SubmittingXBlockMixin,
                              StudioEditableXBlockMixin):
    """
    Question Generator XBlock
    """

    display_name = String(
        display_name="Question Generator XBlock",
        help=
        "This name appears in the horizontal navigation at the top of the page.",
        scope=Scope.settings,
        default="Question Generator XBlock")

    max_attempts = Integer(
        display_name="Maximum Attempts",
        help=
        "Defines the number of times a student can try to answer this problem.",
        default=1,
        values={"min": 1},
        scope=Scope.settings)

    max_points = Integer(
        display_name="Possible points",
        help="Defines the maximum points that the learner can earn.",
        default=1,
        scope=Scope.settings)

    show_points_earned = Boolean(display_name="Shows points earned",
                                 help="Shows points earned",
                                 default=True,
                                 scope=Scope.settings)

    show_submission_times = Boolean(display_name="Shows submission times",
                                    help="Shows submission times",
                                    default=True,
                                    scope=Scope.settings)

    show_answer = Boolean(
        display_name="Show Answer",
        help="Defines when to show the 'Show/Hide Answer' button",
        default=True,
        scope=Scope.settings)

    xblock_id = None
    newly_created_block = True

    image_url = "http://edx4.vietnamx.org:18010/asset-v1:IU+DemoX+2017_04+type@asset+block@images_logic_gate_sln.png"
    question_template = ""
    variables = {}
    answer_template = ""

    generated_question = ""
    generated_variables = {}

    student_answer = ""

    attempt_number = 0

    editable_fields = ('display_name', 'max_attempts', 'max_points',
                       'show_points_earned', 'show_submission_times',
                       'show_answer')

    has_score = True

    matlab_server_url = "172.18.10.33:8080"  # TODO allows user to config MATLAB URL in studio
    matlab_solver_url = "/solve"  # TODO allows user to config MATLAB URL in studio

    def resource_string(self, path):
        """Handy helper for getting resources from our kit."""
        data = pkg_resources.resource_string(__name__, path)
        return data.decode("utf8")

    def student_view(self, context=None):
        """
        The primary view of the QuestionGeneratorXBlock, shown to students when viewing courses.
        """

        context = {}

        if self.xblock_id is None:
            self.xblock_id = unicode(
                self.location.replace(branch=None, version=None))

        if self.newly_created_block:
            self.newly_created_block = (qgb_db_service.is_block_in_db(
                self.xblock_id) is False)

        if (self.newly_created_block is
                True):  # generate question template for newly created XBlock
            self.question_template, self.variables, self.answer_template = qgb_question_service.generate_question_template(
            )
            qgb_db_service.create_question_template(self.xblock_id,
                                                    self.question_template,
                                                    self.variables,
                                                    self.answer_template)
            self.newly_created_block = False
        else:  # existing question template in dbms
            self.load_data_from_dbms()

        # generate question from template if necessary
        if (self.generated_question == ""):
            self.generated_question, self.generated_variables = qgb_question_service.generate_question(
                self.question_template, self.variables)

        # load submission data to display the previously submitted result
        submissions = sub_api.get_submissions(self.student_item_key, 1)
        if submissions:
            latest_submission = submissions[0]

            # parse the answer
            answer = latest_submission['answer']  # saved "answer information"
            self.generated_question = answer['generated_question']
            #            self.generated_answer = answer['generated_answer']  # teacher's generated answer
            self.student_answer = answer[
                'student_answer']  # student's submitted answer

            if ('variable_values' in answer):  # backward compatibility
                saved_generated_variables = json.loads(
                    answer['variable_values'])
                for var_name, var_value in saved_generated_variables.iteritems(
                ):
                    self.generated_variables[var_name] = var_value

            self.attempt_number = latest_submission['attempt_number']
            if (self.attempt_number >= self.max_attempts):
                context['disabled'] = 'disabled'
            else:
                context['disabled'] = ''

        self.serialize_data_to_context(context)

        context['image_url'] = self.image_url
        context['student_answer'] = self.student_answer
        context['attempt_number'] = self.attempt_number_string
        context['point_string'] = self.point_string
        context['question'] = self.generated_question
        context['xblock_id'] = self.xblock_id
        context['show_answer'] = self.show_answer

        frag = Fragment()
        frag.content = loader.render_template(
            'static/html/question_generator_block.html', context)
        frag.add_css(
            self.resource_string("static/css/question_generator_block.css"))
        frag.add_javascript(
            self.resource_string("static/js/src/question_generator_block.js"))
        frag.initialize_js('QuestionGeneratorXBlock')
        return frag

    def studio_view(self, context):
        """
        Render a form for editing this XBlock (override the StudioEditableXBlockMixin's method)
        """

        # if the XBlock has been submitted already then disable the studio_edit screen
        location = self.location.replace(
            branch=None,
            version=None)  # Standardize the key in case it isn't already
        item_id = unicode(location)

        # Student not yet submit then we can edit the XBlock
        fragment = Fragment()
        context = {'fields': []}
        # Build a list of all the fields that can be edited:
        for field_name in self.editable_fields:
            field = self.fields[field_name]
            assert field.scope in (Scope.content, Scope.settings), (
                "Only Scope.content or Scope.settings fields can be used with "
                "StudioEditableXBlockMixin. Other scopes are for user-specific data and are "
                "not generally created/configured by content authors in Studio."
            )
            field_info = self._make_field_info(field_name, field)
            if field_info is not None:
                context["fields"].append(field_info)

        # (re-)fetch data from the database
        self.load_data_from_dbms()
        # self.serialize_data_to_context(context) ??? REMOVE not necessary, remove
        context['image_url'] = self.image_url
        context['question_template'] = self.question_template
        context["variables"] = self.variables
        context['answer_template'] = self.answer_template

        if qgb_db_service.is_xblock_submitted(item_id):
            context['is_submitted'] = 'True'
        else:
            context['is_submitted'] = 'False'

        fragment.content = loader.render_template(
            'static/html/question_generator_studio_edit.html', context)
        fragment.add_css(
            self.resource_string(
                "static/css/question_generator_block_studio_edit.css"))
        fragment.add_javascript(
            loader.load_unicode(
                'static/js/src/question_generator_studio_edit.js'))
        fragment.initialize_js('StudioEditableXBlockMixin')
        return fragment

    def serialize_data_to_context(self, context):
        """
        Save data to context to re-use later to avoid re-accessing the DBMS
        """
        context['saved_question_template'] = self.question_template
        context['saved_answer_template'] = self.answer_template
        context['serialized_variables'] = json.dumps(self.variables)
        context['serialized_generated_variables'] = json.dumps(
            self.generated_variables)

    def deserialize_data_from_context(self, context):
        """
        De-serialize data previously saved to context
        """
        self.question_template = context['saved_question_template']
        self.answer_template = context['saved_answer_template']
        self.variables = json.loads(context['serialized_variables'])
        self.generated_variables = json.loads(
            context['serialized_generated_variables'])

    def load_data_from_dbms(self):
        """
        Load question template data from MySQL
        """

        if self.xblock_id is None:
            self.xblock_id = unicode(
                self.location.replace(branch=None, version=None))

        self.question_template, self.variables, self.answer_template = qgb_db_service.fetch_question_template_data(
            self.xblock_id)

    @XBlock.json_handler
    def student_submit(self, data, suffix=''):
        """
        AJAX handler for Submit button
        """

        self.deserialize_data_from_context(data)

        points_earned = 0

        # TODO generate the teacher's answer

        generated_answer = qgb_question_service.generate_answer(
            self.generated_variables, self.answer_template)
        student_answer = data['student_answer']

        # save the submission
        submission_data = {
            'generated_question': data['saved_generated_question'],
            'student_answer': student_answer,
            'generated_answer': generated_answer,
            'variable_values': data['serialized_generated_variables']
        }

        # call matlab
        evaluation_result = matlab_service.evaluate_matlab_answer(
            self.matlab_server_url, self.matlab_solver_url, generated_answer,
            student_answer)
        if evaluation_result == True:
            points_earned = self.max_points

        submission = sub_api.create_submission(self.student_item_key,
                                               submission_data)
        sub_api.set_score(submission['uuid'], points_earned, self.max_points)

        submit_result = {}
        submit_result['point_string'] = self.point_string

        # disable the "Submit" button once the submission attempts reach max_attemps value
        self.attempt_number = submission['attempt_number']
        submit_result['attempt_number'] = self.attempt_number_string
        if (self.attempt_number >= self.max_attempts):
            submit_result['submit_disabled'] = 'disabled'
        else:
            submit_result['submit_disabled'] = ''

        return submit_result

    @XBlock.json_handler
    def fe_submit_studio_edits(self, data, suffix=''):
        """
        AJAX handler for studio edit submission
        """

        if self.xblock_id is None:
            self.xblock_id = unicode(
                self.location.replace(branch=None, version=None))

        updated_question_template = data['question_template']
        updated_variables = data['variables']
        updated_answer_template = data['answer_template']

        qgb_db_service.update_question_template(self.xblock_id,
                                                updated_question_template,
                                                updated_variables,
                                                updated_answer_template)

        # "refresh" XBlock's values
        self.question_template = updated_question_template
        self.variables = updated_variables
        self.answer_template = updated_answer_template

        # call parent method
        # StudioEditableXBlockMixin.submit_studio_edits(self, data, suffix)
        # self.submit_studio_edits(data, suffix)
        # super(FormulaExerciseXBlock, self).submit_studio_edits(data, suffix)

        # copy from StudioEditableXBlockMixin (can not call parent method)
        values = {}  # dict of new field values we are updating
        to_reset = []  # list of field names to delete from this XBlock
        for field_name in self.editable_fields:
            field = self.fields[field_name]
            if field_name in data['values']:
                if isinstance(field, JSONField):
                    values[field_name] = field.from_json(
                        data['values'][field_name])
                else:
                    raise JsonHandlerError(
                        400, "Unsupported field type: {}".format(field_name))
            elif field_name in data['defaults'] and field.is_set_on(self):
                to_reset.append(field_name)
        self.clean_studio_edits(values)
        validation = Validation(self.scope_ids.usage_id)
        # We cannot set the fields on self yet, because even if validation fails, studio is going to save any changes we
        # make. So we create a "fake" object that has all the field values we are about to set.
        preview_data = FutureFields(new_fields_dict=values,
                                    newly_removed_fields=to_reset,
                                    fallback_obj=self)
        self.validate_field_data(validation, preview_data)
        if validation:
            for field_name, value in values.iteritems():
                setattr(self, field_name, value)
            for field_name in to_reset:
                self.fields[field_name].delete_from(self)
            return {'result': 'success'}
        else:
            raise JsonHandlerError(400, validation.to_json())

    @property
    def point_string(self):
        if self.show_points_earned:
            score = sub_api.get_score(self.student_item_key)
            if score != None:
                return str(score['points_earned']) + ' / ' + str(
                    score['points_possible']) + ' point(s)'

        return str(self.max_points) + ' point(s) possible'

    @property
    def attempt_number_string(self):
        if (self.show_submission_times):
            return "You have submitted " + str(
                self.attempt_number) + "/" + str(
                    self.max_attempts) + " time(s)"

        return ""

    @XBlock.json_handler
    def show_answer_handler(self, data, suffix=''):
        """
        AJAX handler for "Show/Hide Answer" button
        """

        self.deserialize_data_from_context(data)

        generated_answer = qgb_question_service.generate_answer(
            self.generated_variables, self.answer_template)

        return {'generated_answer': generated_answer}

    # TO-DO: change this to create the scenarios you'd like to see in the
    # workbench while developing your XBlock.
    @staticmethod
    def workbench_scenarios():
        """A canned scenario for display in the workbench."""
        return [
            ("QuestionGeneratorXBlock", """<question_generator_block/>
             """),
            ("Multiple QuestionGeneratorXBlock", """<vertical_demo>
                <question_generator_block/>
                <question_generator_block/>
                <question_generator_block/>
                </vertical_demo>
             """),
        ]
Beispiel #24
0
class OpenAssessmentBlock(MessageMixin, SubmissionMixin, PeerAssessmentMixin,
                          SelfAssessmentMixin, StaffAssessmentMixin,
                          StudioMixin, GradeMixin, LeaderboardMixin,
                          StaffAreaMixin, WorkflowMixin, StudentTrainingMixin,
                          LmsCompatibilityMixin, CourseItemsListingMixin,
                          XBlock):
    """Displays a prompt and provides an area where students can compose a response."""

    public_dir = 'static'

    submission_start = String(
        default=DEFAULT_START,
        scope=Scope.settings,
        help="ISO-8601 formatted string representing the submission start date."
    )

    submission_due = String(
        default=DEFAULT_DUE,
        scope=Scope.settings,
        help="ISO-8601 formatted string representing the submission due date.")

    text_response_raw = String(
        help=
        "Specify whether learners must include a text based response to this problem's prompt.",
        default="required",
        scope=Scope.settings)

    file_upload_response_raw = String(
        help=
        "Specify whether learners are able to upload files as a part of their response.",
        default=None,
        scope=Scope.settings)

    allow_file_upload = Boolean(
        default=False,
        scope=Scope.content,
        help="Do not use. For backwards compatibility only.")

    file_upload_type_raw = String(
        default=None,
        scope=Scope.content,
        help=
        "File upload to be included with submission (can be 'image', 'pdf-and-image', or 'custom')."
    )

    white_listed_file_types = List(
        default=[],
        scope=Scope.content,
        help="Custom list of file types allowed with submission.")

    allow_latex = Boolean(default=False,
                          scope=Scope.settings,
                          help="Latex rendering allowed with submission.")

    title = String(default="Open Response Assessment",
                   scope=Scope.content,
                   help="A title to display to a student (plain text).")

    leaderboard_show = Integer(
        default=0,
        scope=Scope.content,
        help="The number of leaderboard results to display (0 if none)")

    prompt = String(default=DEFAULT_PROMPT,
                    scope=Scope.content,
                    help="The prompts to display to a student.")

    prompts_type = String(default='text',
                          scope=Scope.content,
                          help="The type of prompt. html or text")

    rubric_criteria = List(
        default=DEFAULT_RUBRIC_CRITERIA,
        scope=Scope.content,
        help="The different parts of grading for students giving feedback.")

    rubric_feedback_prompt = String(
        default=DEFAULT_RUBRIC_FEEDBACK_PROMPT,
        scope=Scope.content,
        help="The rubric feedback prompt displayed to the student")

    rubric_feedback_default_text = String(
        default=DEFAULT_RUBRIC_FEEDBACK_TEXT,
        scope=Scope.content,
        help="The default rubric feedback text displayed to the student")

    rubric_assessments = List(
        default=DEFAULT_ASSESSMENT_MODULES,
        scope=Scope.content,
        help=
        "The requested set of assessments and the order in which to apply them."
    )

    course_id = String(
        default=u"TestCourse",
        scope=Scope.content,
        help=
        "The course_id associated with this prompt (until we can get it from runtime)."
    )

    submission_uuid = String(
        default=None,
        scope=Scope.user_state,
        help="The student's submission that others will be assessing.")

    has_saved = Boolean(
        default=False,
        scope=Scope.user_state,
        help="Indicates whether the user has saved a response.")

    saved_response = String(
        default=u"",
        scope=Scope.user_state,
        help="Saved response submission for the current user.")

    saved_files_descriptions = String(
        default=u"",
        scope=Scope.user_state,
        help="Saved descriptions for each uploaded file.")

    no_peers = Boolean(
        default=False,
        scope=Scope.user_state,
        help="Indicates whether or not there are peers to grade.")

    @property
    def course_id(self):
        return text_type(self.xmodule_runtime.course_id)

    @property
    def text_response(self):
        """
        Backward compatibility for existing blocks that were created without text_response
        or file_upload_response fields. These blocks will be treated as required text.
        """
        if not self.file_upload_response and not self.text_response_raw:
            return 'required'
        else:
            return self.text_response_raw

    @text_response.setter
    def text_response(self, value):
        """
        Setter for text_response_raw
        """
        self.text_response_raw = value if value else None

    @property
    def file_upload_response(self):
        """
        Backward compatibility for existing block before that were created without
        'text_response' and 'file_upload_response_raw' fields.
        """
        if not self.file_upload_response_raw and (self.file_upload_type_raw
                                                  is not None
                                                  or self.allow_file_upload):
            return 'optional'
        else:
            return self.file_upload_response_raw

    @file_upload_response.setter
    def file_upload_response(self, value):
        """
        Setter for file_upload_response_raw
        """
        self.file_upload_response_raw = value if value else None

    @property
    def file_upload_type(self):
        """
        Backward compatibility for existing block before the change from allow_file_upload to file_upload_type_raw.

        This property will use new file_upload_type_raw field when available, otherwise will fall back to
        allow_file_upload field for old blocks.
        """
        if self.file_upload_type_raw is not None:
            return self.file_upload_type_raw
        if self.allow_file_upload:
            return 'image'
        else:
            return None

    @file_upload_type.setter
    def file_upload_type(self, value):
        """
        Setter for file_upload_type_raw
        """
        self.file_upload_type_raw = value

    @property
    def white_listed_file_types_string(self):
        """
        Join the white listed file types into comma delimited string
        """
        if self.white_listed_file_types:
            return ','.join(self.white_listed_file_types)
        else:
            return ''

    @white_listed_file_types_string.setter
    def white_listed_file_types_string(self, value):
        """
        Convert comma delimited white list string into list with some clean up
        """
        self.white_listed_file_types = [
            file_type.strip().strip('.').lower()
            for file_type in value.split(',')
        ] if value else None

    def get_anonymous_user_id(self, username, course_id):
        """
        Get the anonymous user id from Xblock user service.

        Args:
            username(str): user's name entered by staff to get info.
            course_id(str): course id.

        Returns:
            A unique id for (user, course) pair
        """
        return self.runtime.service(self, 'user').get_anonymous_user_id(
            username, course_id)

    def get_student_item_dict(self, anonymous_user_id=None):
        """Create a student_item_dict from our surrounding context.

        See also: submissions.api for details.

        Args:
            anonymous_user_id(str): A unique anonymous_user_id for (user, course) pair.
        Returns:
            (dict): The student item associated with this XBlock instance. This
                includes the student id, item id, and course id.
        """

        item_id = text_type(self.scope_ids.usage_id)

        # This is not the real way course_ids should work, but this is a
        # temporary expediency for LMS integration
        if hasattr(self, "xmodule_runtime"):
            course_id = self.course_id  # pylint:disable=E1101
            if anonymous_user_id:
                student_id = anonymous_user_id
            else:
                student_id = self.xmodule_runtime.anonymous_student_id  # pylint:disable=E1101
        else:
            course_id = "edX/Enchantment_101/April_1"
            if self.scope_ids.user_id is None:
                student_id = None
            else:
                student_id = text_type(self.scope_ids.user_id)

        student_item_dict = dict(student_id=student_id,
                                 item_id=item_id,
                                 course_id=course_id,
                                 item_type='openassessment')
        return student_item_dict

    def add_javascript_files(self, fragment, item):
        """
        Add all the JavaScript files from a directory to the specified fragment
        """
        if pkg_resources.resource_isdir(__name__, item):
            for child_item in pkg_resources.resource_listdir(__name__, item):
                path = os.path.join(item, child_item)
                if not pkg_resources.resource_isdir(__name__, path):
                    fragment.add_javascript_url(
                        self.runtime.local_resource_url(self, path))
        else:
            fragment.add_javascript_url(
                self.runtime.local_resource_url(self, item))

    def student_view(self, context=None):
        """The main view of OpenAssessmentBlock, displayed when viewing courses.

        The main view which displays the general layout for Open Ended
        Assessment Questions. The contents of the XBlock are determined
        dynamically based on the assessment workflow configured by the author.

        Args:
            context: Not used for this view.

        Returns:
            (Fragment): The HTML Fragment for this XBlock, which determines the
            general frame of the Open Ended Assessment Question.
        """
        # On page load, update the workflow status.
        # We need to do this here because peers may have graded us, in which
        # case we may have a score available.

        try:
            self.update_workflow_status()
        except AssessmentWorkflowError:
            # Log the exception, but continue loading the page
            logger.exception(
                'An error occurred while updating the workflow on page load.')

        ui_models = self._create_ui_models()
        # All data we intend to pass to the front end.
        context_dict = {
            "title": self.title,
            "prompts": self.prompts,
            "prompts_type": self.prompts_type,
            "rubric_assessments": ui_models,
            "show_staff_area": self.is_course_staff
            and not self.in_studio_preview,
        }
        template = get_template("openassessmentblock/oa_base.html")

        return self._create_fragment(template,
                                     context_dict,
                                     initialize_js_func='OpenAssessmentBlock')

    def ora_blocks_listing_view(self, context=None):
        """This view is used in the Open Response Assessment tab in the LMS Instructor Dashboard
        to display all available course ORA blocks.

        Args:
            context: contains two items:
                "ora_items" - all course items with names and parents, example:
                    [{"parent_name": "Vertical name",
                      "name": "ORA Display Name",
                      "url_grade_available_responses": "/grade_available_responses_view",
                      "staff_assessment": false,
                      "parent_id": "vertical_block_id",
                      "url_base": "/student_view",
                      "id": "openassessment_block_id"
                     }, ...]
                "ora_item_view_enabled" - enabled LMS API endpoint to serve XBlock view or not

        Returns:
            (Fragment): The HTML Fragment for this XBlock.
        """
        ora_items = context.get('ora_items', []) if context else []
        ora_item_view_enabled = context.get('ora_item_view_enabled',
                                            False) if context else False
        context_dict = {
            "ora_items": json.dumps(ora_items),
            "ora_item_view_enabled": ora_item_view_enabled
        }

        template = get_template(
            'openassessmentblock/instructor_dashboard/oa_listing.html')

        min_postfix = '.min' if settings.DEBUG else ''

        return self._create_fragment(
            template,
            context_dict,
            initialize_js_func='CourseOpenResponsesListingBlock',
            additional_css=[
                "static/css/lib/backgrid/backgrid%s.css" % min_postfix
            ],
            additional_js=[
                "static/js/lib/backgrid/backgrid%s.js" % min_postfix
            ])

    def grade_available_responses_view(self, context=None):  # pylint: disable=unused-argument
        """Grade Available Responses view.

        Auxiliary view which displays the staff grading area
        (used in the Open Response Assessment tab in the Instructor Dashboard of LMS)

        Args:
            context: Not used for this view.

        Returns:
            (Fragment): The HTML Fragment for this XBlock.
        """
        student_item = self.get_student_item_dict()
        staff_assessment_required = "staff-assessment" in self.assessment_steps

        context_dict = {
            "title": self.title,
            'staff_assessment_required': staff_assessment_required
        }

        if staff_assessment_required:
            context_dict.update(
                self.get_staff_assessment_statistics_context(
                    student_item["course_id"], student_item["item_id"]))

        template = get_template(
            'openassessmentblock/instructor_dashboard/oa_grade_available_responses.html'
        )

        return self._create_fragment(template,
                                     context_dict,
                                     initialize_js_func='StaffAssessmentBlock')

    def _create_fragment(self,
                         template,
                         context_dict,
                         initialize_js_func,
                         additional_css=None,
                         additional_js=None):
        """
        Creates a fragment for display.

        """
        fragment = Fragment(template.render(context_dict))

        if additional_css is None:
            additional_css = []
        if additional_js is None:
            additional_js = []

        i18n_service = self.runtime.service(self, 'i18n')
        if hasattr(i18n_service,
                   'get_language_bidi') and i18n_service.get_language_bidi():
            css_url = "static/css/openassessment-rtl.css"
        else:
            css_url = "static/css/openassessment-ltr.css"

        if settings.DEBUG:
            for css in additional_css:
                fragment.add_css_url(self.runtime.local_resource_url(
                    self, css))
            fragment.add_css_url(self.runtime.local_resource_url(
                self, css_url))

            for js in additional_js:
                self.add_javascript_files(fragment, js)
            self.add_javascript_files(fragment, "static/js/src/oa_shared.js")
            self.add_javascript_files(fragment, "static/js/src/oa_server.js")
            self.add_javascript_files(fragment, "static/js/src/lms")
        else:
            # TODO: load CSS and JavaScript as URLs once they can be served by the CDN
            for css in additional_css:
                fragment.add_css(load(css))
            fragment.add_css(load(css_url))

            # minified additional_js should be already included in 'make javascript'
            fragment.add_javascript(
                load("static/js/openassessment-lms.min.js"))
        js_context_dict = {
            "ALLOWED_IMAGE_MIME_TYPES": self.ALLOWED_IMAGE_MIME_TYPES,
            "ALLOWED_FILE_MIME_TYPES": self.ALLOWED_FILE_MIME_TYPES,
            "FILE_EXT_BLACK_LIST": self.FILE_EXT_BLACK_LIST,
            "FILE_TYPE_WHITE_LIST": self.white_listed_file_types,
        }
        fragment.initialize_js(initialize_js_func, js_context_dict)
        return fragment

    @property
    def is_admin(self):
        """
        Check whether the user has global staff permissions.

        Returns:
            bool
        """
        if hasattr(self, 'xmodule_runtime'):
            return getattr(self.xmodule_runtime, 'user_is_admin', False)
        else:
            return False

    @property
    def is_course_staff(self):
        """
        Check whether the user has course staff permissions for this XBlock.

        Returns:
            bool
        """
        if hasattr(self, 'xmodule_runtime'):
            return getattr(self.xmodule_runtime, 'user_is_staff', False)
        else:
            return False

    @property
    def is_beta_tester(self):
        """
        Check whether the user is a beta tester.

        Returns:
            bool
        """
        if hasattr(self, 'xmodule_runtime'):
            return getattr(self.xmodule_runtime, 'user_is_beta_tester', False)
        else:
            return False

    @property
    def in_studio_preview(self):
        """
        Check whether we are in Studio preview mode.

        Returns:
            bool

        """
        # When we're running in Studio Preview mode, the XBlock won't provide us with a user ID.
        # (Note that `self.xmodule_runtime` will still provide an anonymous
        # student ID, so we can't rely on that)
        return self.scope_ids.user_id is None

    def _create_ui_models(self):
        """Combine UI attributes and XBlock configuration into a UI model.

        This method takes all configuration for this XBlock instance and appends
        UI attributes to create a UI Model for rendering all assessment modules.
        This allows a clean separation of static UI attributes from persistent
        XBlock configuration.

        """
        ui_models = [UI_MODELS["submission"]]
        staff_assessment_required = False
        for assessment in self.valid_assessments:
            if assessment["name"] == "staff-assessment":
                if not assessment["required"]:
                    continue
                else:
                    staff_assessment_required = True
            ui_model = UI_MODELS.get(assessment["name"])
            if ui_model:
                ui_models.append(dict(assessment, **ui_model))

        if not staff_assessment_required and self.staff_assessment_exists(
                self.submission_uuid):
            ui_models.append(UI_MODELS["staff-assessment"])

        ui_models.append(UI_MODELS["grade"])

        if self.leaderboard_show > 0:
            ui_models.append(UI_MODELS["leaderboard"])

        return ui_models

    @staticmethod
    def workbench_scenarios():
        """A canned scenario for display in the workbench.

        These scenarios are only intended to be used for Workbench XBlock
        Development.

        """
        return [
            ("OpenAssessmentBlock File Upload: Images",
             load('static/xml/file_upload_image_only.xml')),
            ("OpenAssessmentBlock File Upload: PDF and Images",
             load('static/xml/file_upload_pdf_and_image.xml')),
            ("OpenAssessmentBlock File Upload: Custom File Types",
             load('static/xml/file_upload_custom.xml')),
            ("OpenAssessmentBlock File Upload: allow_file_upload compatibility",
             load('static/xml/file_upload_compat.xml')),
            ("OpenAssessmentBlock Unicode", load('static/xml/unicode.xml')),
            ("OpenAssessmentBlock Poverty Rubric",
             load('static/xml/poverty_rubric_example.xml')),
            ("OpenAssessmentBlock Leaderboard",
             load('static/xml/leaderboard.xml')),
            ("OpenAssessmentBlock Leaderboard with Custom File Type",
             load('static/xml/leaderboard_custom.xml')),
            ("OpenAssessmentBlock (Peer Only) Rubric",
             load('static/xml/poverty_peer_only_example.xml')),
            ("OpenAssessmentBlock (Self Only) Rubric",
             load('static/xml/poverty_self_only_example.xml')),
            ("OpenAssessmentBlock Censorship Rubric",
             load('static/xml/censorship_rubric_example.xml')),
            ("OpenAssessmentBlock Promptless Rubric",
             load('static/xml/promptless_rubric_example.xml')),
        ]

    @classmethod
    def parse_xml(cls, node, runtime, keys, id_generator):
        """Instantiate XBlock object from runtime XML definition.

        Inherited by XBlock core.

        """
        config = parse_from_xml(node)
        block = runtime.construct_xblock_from_class(cls, keys)

        xblock_validator = validator(block, block._, strict_post_release=False)
        xblock_validator(create_rubric_dict(config['prompts'],
                                            config['rubric_criteria']),
                         config['rubric_assessments'],
                         submission_start=config['submission_start'],
                         submission_due=config['submission_due'],
                         leaderboard_show=config['leaderboard_show'])

        block.rubric_criteria = config['rubric_criteria']
        block.rubric_feedback_prompt = config['rubric_feedback_prompt']
        block.rubric_feedback_default_text = config[
            'rubric_feedback_default_text']
        block.rubric_assessments = config['rubric_assessments']
        block.submission_start = config['submission_start']
        block.submission_due = config['submission_due']
        block.title = config['title']
        block.prompts = config['prompts']
        block.prompts_type = config['prompts_type']
        block.text_response = config['text_response']
        block.file_upload_response = config['file_upload_response']
        block.allow_file_upload = config['allow_file_upload']
        block.file_upload_type = config['file_upload_type']
        block.white_listed_file_types_string = config[
            'white_listed_file_types']
        block.allow_latex = config['allow_latex']
        block.leaderboard_show = config['leaderboard_show']
        block.group_access = config['group_access']

        return block

    @property
    def _(self):
        i18nService = self.runtime.service(self, 'i18n')
        return i18nService.ugettext

    @property
    def prompts(self):
        """
        Return the prompts.

        Initially a block had a single prompt which was saved as a simple
        string in the prompt field. Now prompts are saved as a serialized
        list of dicts in the same field. If prompt field contains valid json,
        parse and return it. Otherwise, assume it is a simple string prompt
        and return it in a list of dict.

        Returns:
            list of dict
        """
        return create_prompts_list(self.prompt)

    @prompts.setter
    def prompts(self, value):
        """
        Serialize the prompts and save to prompt field.

        Args:
            value (list of dict): The prompts to set.
        """

        if value is None:
            self.prompt = None
        elif len(value) == 1:
            # For backwards compatibility. To be removed after all code
            # is migrated to use prompts property instead of prompt field.
            self.prompt = value[0]['description']
        else:
            self.prompt = json.dumps(value)

    @property
    def valid_assessments(self):
        """
        Return a list of assessment dictionaries that we recognize.
        This allows us to gracefully handle situations in which unrecognized
        assessment types are stored in the XBlock field (e.g. because
        we roll back code after releasing a feature).

        Returns:
            list

        """
        _valid_assessments = [
            asmnt for asmnt in self.rubric_assessments
            if asmnt.get('name') in VALID_ASSESSMENT_TYPES
        ]
        return update_assessments_format(copy.deepcopy(_valid_assessments))

    @property
    def assessment_steps(self):
        return [asmnt['name'] for asmnt in self.valid_assessments]

    @lazy
    def rubric_criteria_with_labels(self):
        """
        Backwards compatibility: We used to treat "name" as both a user-facing label
        and a unique identifier for criteria and options.
        Now we treat "name" as a unique identifier, and we've added an additional "label"
        field that we display to the user.
        If criteria/options in the problem definition do NOT have a "label" field
        (because they were created before this change),
        we create a new label that has the same value as "name".

        The result of this call is cached, so it should NOT be used in a runtime
        that can modify the XBlock settings (in the LMS, settings are read-only).

        Returns:
            list of criteria dictionaries

        """
        criteria = copy.deepcopy(self.rubric_criteria)
        for criterion in criteria:
            if 'label' not in criterion:
                criterion['label'] = criterion['name']
            for option in criterion['options']:
                if 'label' not in option:
                    option['label'] = option['name']
        return criteria

    def render_assessment(self, path, context_dict=None):
        """Render an Assessment Module's HTML

        Given the name of an assessment module, find it in the list of
        configured modules, and ask for its rendered HTML.

        Args:
            path (str): The path to the template used to render this HTML
                section.
            context_dict (dict): A dictionary of context variables used to
                populate this HTML section.

        Returns:
            (Response): A Response Object with the generated HTML fragment. This
                is intended for AJAX calls to load dynamically into a larger
                document.
        """
        if not context_dict:
            context_dict = {}

        template = get_template(path)
        return Response(template.render(context_dict),
                        content_type='application/html',
                        charset='UTF-8')

    def add_xml_to_node(self, node):
        """
        Serialize the XBlock to XML for exporting.
        """
        serialize_content_to_xml(self, node)

    def render_error(self, error_msg):
        """
        Render an error message.

        Args:
            error_msg (unicode): The error message to display.

        Returns:
            Response: A response object with an HTML body.
        """
        context = {'error_msg': error_msg}
        template = get_template('openassessmentblock/oa_error.html')
        return Response(template.render(context),
                        content_type='application/html',
                        charset='UTF-8')

    def is_closed(self, step=None, course_staff=None):
        """
        Checks if the question is closed.

        Determines if the start date is in the future or the end date has
            passed.  Optionally limited to a particular step in the workflow.

        Start/due dates do NOT apply to course staff, since course staff may need to get to
        the peer grading step AFTER the submission deadline has passed.
        This may not be necessary when we implement a grading interface specifically for course staff.

        Keyword Arguments:
            step (str): The step in the workflow to check.  Options are:
                None: check whether the problem as a whole is open.
                "submission": check whether the submission section is open.
                "peer-assessment": check whether the peer-assessment section is open.
                "self-assessment": check whether the self-assessment section is open.

            course_staff (bool): Whether to treat the user as course staff (disable start/due dates).
                If not specified, default to the current user's status.

        Returns:
            tuple of the form (is_closed, reason, start_date, due_date), where
                is_closed (bool): indicates whether the step is closed.
                reason (str or None): specifies the reason the step is closed ("start" or "due")
                start_date (datetime): is the start date of the step/problem.
                due_date (datetime): is the due date of the step/problem.

        Examples:
            >>> is_closed()
            False, None, datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
            datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
            >>> is_closed(step="submission")
            True, "due", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
            datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
            >>> is_closed(step="self-assessment")
            True, "start", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
            datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)

        """
        submission_range = (self.submission_start, self.submission_due)
        assessment_ranges = [(asmnt.get('start'), asmnt.get('due'))
                             for asmnt in self.valid_assessments]

        # Resolve unspecified dates and date strings to datetimes
        start, due, date_ranges = resolve_dates(
            self.start, self.due, [submission_range] + assessment_ranges,
            self._)

        open_range = (start, due)
        assessment_steps = self.assessment_steps
        if step == 'submission':
            open_range = date_ranges[0]
        elif step in assessment_steps:
            step_index = assessment_steps.index(step)
            open_range = date_ranges[1 + step_index]

        # Course staff always have access to the problem
        if course_staff is None:
            course_staff = self.is_course_staff
        if course_staff:
            return False, None, DISTANT_PAST, DISTANT_FUTURE

        if self.is_beta_tester:
            beta_start = self._adjust_start_date_for_beta_testers(
                open_range[0])
            open_range = (beta_start, open_range[1])

        # Check if we are in the open date range
        now = dt.datetime.utcnow().replace(tzinfo=pytz.utc)

        if now < open_range[0]:
            return True, "start", open_range[0], open_range[1]
        elif now >= open_range[1]:
            return True, "due", open_range[0], open_range[1]
        else:
            return False, None, open_range[0], open_range[1]

    def get_waiting_details(self, status_details):
        """
        Returns waiting status (boolean value) based on the given status_details.

        Args:
            status_details (dict): A dictionary containing the details of each
                assessment module status. This will contain keys such as
                "peer", "ai", and "staff", referring to dictionaries, which in
                turn will have the key "graded". If this key has a value set,
                these assessment modules have been graded.

        Returns:
            True if waiting for a grade from peer, ai, or staff assessment, else False.

        Examples:
            >>> now = dt.datetime.utcnow().replace(tzinfo=pytz.utc)
            >>> status_details = {
            >>>     'peer': {
            >>>         'completed': None,
            >>>         'graded': now
            >>>     },
            >>>     'ai': {
            >>>         'completed': now,
            >>>         'graded': None
            >>>     }
            >>> }
            >>> self.get_waiting_details(status_details)
            True
        """
        steps = [
            "peer", "ai", "staff"
        ]  # These are the steps that can be submitter-complete, but lack a grade
        for step in steps:
            if step in status_details and not status_details[step]["graded"]:
                return True
        return False

    def is_released(self, step=None):
        """
        Check if a question has been released.

        Keyword Arguments:
            step (str): The step in the workflow to check.
                None: check whether the problem as a whole is open.
                "submission": check whether the submission section is open.
                "peer-assessment": check whether the peer-assessment section is open.
                "self-assessment": check whether the self-assessment section is open.

        Returns:
            bool
        """
        # By default, assume that we're published, in case the runtime doesn't support publish date.
        if hasattr(self.runtime, 'modulestore'):
            is_published = self.runtime.modulestore.has_published_version(self)
        else:
            is_published = True
        is_closed, reason, __, __ = self.is_closed(step=step)
        is_released = is_published and (not is_closed or reason == 'due')
        if self.start:
            is_released = is_released and dt.datetime.now(
                pytz.UTC) > parse_date_value(self.start, self._)
        return is_released

    def get_assessment_module(self, mixin_name):
        """
        Get a configured assessment module by name.

        Args:
            mixin_name (str): The name of the mixin (e.g. "self-assessment" or "peer-assessment")

        Returns:
            dict

        Example:
            >>> self.get_assessment_module('peer-assessment')
            {
                "name": "peer-assessment",
                "start": None,
                "due": None,
                "must_grade": 5,
                "must_be_graded_by": 3,
            }
        """
        for assessment in self.valid_assessments:
            if assessment["name"] == mixin_name:
                return assessment

    def publish_assessment_event(self, event_name, assessment, **kwargs):
        """
        Emit an analytics event for the peer assessment.

        Args:
            event_name (str): An identifier for this event type.
            assessment (dict): The serialized assessment model.

        Returns:
            None

        """
        parts_list = []
        for part in assessment["parts"]:
            # Some assessment parts do not include point values,
            # only written feedback.  In this case, the assessment
            # part won't have an associated option.
            option_dict = None
            if part["option"] is not None:
                option_dict = {
                    "name": part["option"]["name"],
                    "points": part["option"]["points"],
                }

            # All assessment parts are associated with criteria
            criterion_dict = {
                "name": part["criterion"]["name"],
                "points_possible": part["criterion"]["points_possible"]
            }

            parts_list.append({
                "option": option_dict,
                "criterion": criterion_dict,
                "feedback": part["feedback"]
            })

        event_data = {
            "feedback": assessment["feedback"],
            "rubric": {
                "content_hash": assessment["rubric"]["content_hash"],
            },
            "scorer_id": assessment["scorer_id"],
            "score_type": assessment["score_type"],
            "scored_at": assessment["scored_at"],
            "submission_uuid": assessment["submission_uuid"],
            "parts": parts_list
        }

        for key in kwargs:
            event_data[key] = kwargs[key]

        self.runtime.publish(self, event_name, event_data)

    @XBlock.json_handler
    def publish_event(self, data, suffix=''):  # pylint: disable=unused-argument
        """
        Publish the given data to an event.

        Expects key 'event_name' to be present in the data dictionary.
        """

        try:
            event_name = data['event_name']
        except KeyError:
            logger.exception(
                "Could not find the name of the event to be triggered.")
            return {'success': False}

        # Remove the name so we don't publish as part of the data.
        del data['event_name']

        self.runtime.publish(self, event_name, data)
        return {'success': True}

    def get_username(self, anonymous_user_id):
        """
        Return the username of the user associated with anonymous_user_id
        Args:
            anonymous_user_id (str): the anonymous user id of the user

        Returns: the username if it can be identified. If the xblock service to converts to a real user
            fails, returns None and logs the error.

        """
        if hasattr(self, "xmodule_runtime"):
            user = self.xmodule_runtime.get_real_user(anonymous_user_id)
            if user:
                return user.username
            else:
                logger.exception(
                    "XBlock service could not find user for anonymous_user_id '{}'"
                    .format(anonymous_user_id))
                return None

    def _adjust_start_date_for_beta_testers(self, start):
        if hasattr(self, "xmodule_runtime"):
            days_early_for_beta = getattr(self.xmodule_runtime,
                                          'days_early_for_beta', 0)
            if days_early_for_beta is not None:
                delta = dt.timedelta(days_early_for_beta)
                effective = start - delta
                return effective

        return start

    def get_xblock_id(self):
        """
        Returns the xblock id
        """
        return text_type(self.scope_ids.usage_id)
Beispiel #25
0
class ScormXBlock(XBlock):
    has_score = True
    has_author_view = True

    display_name = String(display_name=_("Display Name"),
                          help=_("Display name for this module"),
                          default="SCORM",
                          scope=Scope.settings)
    description = String(
        display_name=_("Description"),
        help=
        _("Brief description of the SCORM modules will be displayed above the player. Can contain HTML."
          ),
        default="",
        scope=Scope.settings)
    scorm_file = String(display_name=_("Upload scorm file (.zip)"),
                        help=_('Upload a new SCORM package.'),
                        scope=Scope.settings)
    scorm_player = String(
        values=[{
            "value": key,
            "display_name": DEFINED_PLAYERS[key]['name']
        } for key in DEFINED_PLAYERS.keys()] + [
            SCORM_PKG_INTERNAL,
        ],
        display_name=_("SCORM player"),
        help=
        _("SCORM player configured in Django settings, or index.html file contained in SCORM package"
          ),
        scope=Scope.settings)
    # this stores latest raw SCORM API data in JSON string
    raw_scorm_status = String(scope=Scope.user_state, default='{}')
    scorm_initialized = Boolean(scope=Scope.user_state, default=False)
    lesson_status = String(scope=Scope.user_state, default='not attempted')
    lesson_score = Float(scope=Scope.user_state, default=0)
    weight = Integer(
        default=1,
        help=
        _('SCORM block\'s problem weight in the course, in points.  If not graded, set to 0'
          ),
        scope=Scope.settings)
    display_type = String(
        display_name=_("Display Type"),
        values=["iframe", "popup"],
        default="iframe",
        help=
        _("Open in a new popup window, or an iframe.  This setting may be overridden by player-specific configuration."
          ),
        scope=Scope.settings)
    display_width = Integer(display_name=_("Display Width (px)"),
                            help=_('Width of iframe or popup window'),
                            default=820,
                            scope=Scope.settings)
    display_height = Integer(display_name=_("Display Height (px)"),
                             help=_('Height of iframe or popup window'),
                             default=450,
                             scope=Scope.settings)
    encoding = String(
        display_name=_("SCORM Package text encoding"),
        default='cp850',
        help=
        _("Character set used in SCORM package.  Defaults to cp850 (or IBM850), for Latin-1: Western European languages)"
          ),
        values=[{
            "value": AVAIL_ENCODINGS[key],
            "display_name": key
        } for key in sorted(AVAIL_ENCODINGS.keys())],
        scope=Scope.settings)
    player_configuration = String(
        display_name=_("Player Configuration"),
        default='',
        help=
        _("JSON object string with overrides to be passed to selected SCORM player.  These will be exposed as data attributes on the host iframe and sent in a window.postMessage to the iframe's content window. Attributes can be any.  'Internal player' will always check this field for an 'initial_html' attribute to override index.html as the initial page."
          ),
        scope=Scope.settings)

    @property
    def student_id(self):
        if hasattr(self, "scope_ids"):
            return self.scope_ids.user_id
        else:
            return None

    @property
    def student_name(self):
        if hasattr(self, "xmodule_runtime"):
            user = self.xmodule_runtime._services['user'].get_current_user()
            try:
                return user.display_name
            except AttributeError:
                return user.full_name
        else:
            return None

    @property
    def course_id(self):
        if hasattr(self, "xmodule_runtime"):
            return self._serialize_opaque_key(self.xmodule_runtime.course_id)
        else:
            return None

    def _reverse_student_name(self, name):
        parts = name.split(' ', 1)
        parts.reverse()
        return ', '.join(parts)

    def _serialize_opaque_key(self, key):
        if hasattr(key, 'to_deprecated_string'):
            return key.to_deprecated_string()
        else:
            return unicode(key)

    def resource_string(self, path):
        """Handy helper for getting resources from our kit."""
        data = pkg_resources.resource_string(__name__, path)
        return data.decode("utf8")

    def student_view(self, context=None, authoring=False):
        scheme = 'https' if settings.HTTPS == 'on' else 'http'
        lms_base = settings.ENV_TOKENS.get('LMS_BASE')
        if isinstance(context, QueryDict):
            context = context.dict()

        if microsite.is_request_in_microsite():
            subdomain = microsite.get_value(
                "domain_prefix",
                None) or microsite.get_value('microsite_config_key')
            lms_base = "{}.{}".format(subdomain, lms_base)
        scorm_player_url = ""

        course_directory = self.scorm_file
        if self.scorm_player == 'SCORM_PKG_INTERNAL':
            # TODO: support initial filename other than index.html for internal players
            #scorm_player_url = '{}://{}{}'.format(scheme, lms_base, self.scorm_file)
            scorm_player_url = '{}{}'.format(self.scorm_file, '/index.html')
        elif self.scorm_player:
            player_config = DEFINED_PLAYERS[self.scorm_player]
            player = player_config['location']
            if '://' in player:
                scorm_player_url = player
            else:
                scorm_player_url = '{}://{}{}'.format(scheme, lms_base, player)
            course_directory = '{}://{}{}'.format(
                scheme, lms_base,
                self.runtime.handler_url(self, "proxy_content"))

        html = self.resource_string("static/html/scormxblock.html")

        # don't call handlers if student_view is not called from within LMS
        # (not really a student)
        if not authoring:
            get_url = '{}://{}{}'.format(
                scheme, lms_base,
                self.runtime.handler_url(self, "get_raw_scorm_status"))
            set_url = '{}://{}{}'.format(
                scheme, lms_base,
                self.runtime.handler_url(self, "set_raw_scorm_status"))
        # PreviewModuleSystem (runtime Mixin from Studio) won't have a hostname
        else:
            # we don't want to get/set SCORM status from preview
            get_url = set_url = '#'

        # if display type is popup, don't use the full window width for the host iframe
        iframe_width = self.display_type == 'popup' and DEFAULT_IFRAME_WIDTH or self.display_width
        iframe_height = self.display_type == 'popup' and DEFAULT_IFRAME_HEIGHT or self.display_height

        try:
            player_config = json.loads(self.player_configuration)
        except ValueError:
            player_config = {}

        frag = Fragment()
        frag.add_content(
            MakoTemplate(text=html.format(
                self=self,
                scorm_player_url=scorm_player_url,
                get_url=get_url,
                set_url=set_url,
                iframe_width=iframe_width,
                iframe_height=iframe_height,
                player_config=player_config,
                scorm_file=course_directory)).render_unicode())

        frag.add_css(self.resource_string("static/css/scormxblock.css"))
        context['block_id'] = self.url_name
        js = self.resource_string("static/js/src/scormxblock.js")
        jsfrag = MakoTemplate(js).render_unicode(**context)
        frag.add_javascript(jsfrag)

        # TODO: this will only work to display staff debug info if 'scormxblock' is one of the
        # categories of blocks that are specified in lms/templates/staff_problem_info.html so this will
        # for now have to be overridden in theme or directly in edx-platform
        # TODO: is there another way to approach this?  key's location.category isn't mutable to spoof 'problem',
        # like setting the name in the entry point to 'problem'.  Doesn't seem like a good idea.  Better to
        # have 'staff debuggable' categories configurable in settings or have an XBlock declare itself staff debuggable
        if SCORM_DISPLAY_STAFF_DEBUG_INFO and not authoring:  # don't show for author preview
            from courseware.access import has_access
            from courseware.courses import get_course_by_id

            course = get_course_by_id(self.xmodule_runtime.course_id)
            dj_user = self.xmodule_runtime._services['user']._django_user
            has_instructor_access = bool(
                has_access(dj_user, 'instructor', course))
            if has_instructor_access:
                disable_staff_debug_info = settings.FEATURES.get(
                    'DISPLAY_DEBUG_INFO_TO_STAFF', True) and False or True
                block = self
                view = 'student_view'
                frag = add_staff_markup(dj_user, has_instructor_access,
                                        disable_staff_debug_info, block, view,
                                        frag, context)

        frag.initialize_js('ScormXBlock_{0}'.format(context['block_id']))
        return frag

    def author_view(self, context=None):
        return self.student_view(context, authoring=True)

    def studio_view(self, context=None):
        html = self.resource_string("static/html/studio.html")
        frag = Fragment()
        context = {'block': self}
        frag.add_content(MakoTemplate(text=html).render_unicode(**context))
        frag.add_css(self.resource_string("static/css/scormxblock.css"))
        frag.add_javascript(self.resource_string("static/js/src/studio.js"))
        frag.add_javascript_url(
            self.runtime.local_resource_url(self,
                                            'public/jquery.fileupload.js'))
        frag.initialize_js('ScormStudioXBlock')
        return frag

    @XBlock.handler
    def upload_status(self, request, suffix=''):
        """
        Scorm package upload to storage status
        """
        upload_percent = ScormPackageUploader.get_upload_percentage(
            self.location.block_id)

        logger.info('Upload percentage is: {}'.format(upload_percent))

        return Response(json.dumps({"progress": upload_percent}))

    @XBlock.handler
    def file_upload_handler(self, request, suffix=''):
        """
        Handler for scorm file upload
        """
        response = {}
        scorm_uploader = ScormPackageUploader(
            request=request, xblock=self, scorm_storage_location=SCORM_STORAGE)

        try:
            state, data = scorm_uploader.upload()
        except Exception as e:
            logger.error('Scorm package upload error: {}'.format(e.message))
            ScormPackageUploader.clear_percentage_cache(self.location.block_id)
            return Response(
                json.dumps({
                    'status': 'error',
                    'message': e.message
                }))

        if state == UPLOAD_STATE.PROGRESS:
            response = {"files": [{"size": data}]}
        elif state == UPLOAD_STATE.COMPLETE and data:
            ScormPackageUploader.clear_percentage_cache(self.location.block_id)
            self.scorm_file = data
            response = {'status': 'OK'}

        return Response(json.dumps(response))

    @XBlock.handler
    def studio_submit(self, request, suffix=''):
        self.display_name = request.params['display_name']
        self.description = request.params['description']
        self.weight = request.params['weight']
        self.display_width = request.params['display_width']
        self.display_height = request.params['display_height']
        self.display_type = request.params['display_type']
        self.scorm_player = request.params['scorm_player']
        self.encoding = request.params['encoding']

        if request.params['player_configuration']:
            try:
                json.loads(
                    request.params['player_configuration'])  # just validation
                self.player_configuration = request.params[
                    'player_configuration']
            except ValueError, e:
                return Response(json.dumps({
                    'result':
                    'failure',
                    'error':
                    'Invalid JSON in Player Configuration'.format(e)
                }),
                                content_type='application/json')

        return Response(json.dumps({'result': 'success'}),
                        content_type='application/json')
class SplitTestFields(object):
    """Fields needed for split test module"""
    has_children = True

    # All available user partitions (with value and display name). This is updated each time
    # editable_metadata_fields is called.
    user_partition_values = []
    # Default value used for user_partition_id
    no_partition_selected = {'display_name': _("Not Selected"), 'value': -1}

    @staticmethod
    def build_partition_values(all_user_partitions, selected_user_partition):
        """
        This helper method builds up the user_partition values that will
        be passed to the Studio editor
        """
        SplitTestFields.user_partition_values = []
        # Add "No selection" value if there is not a valid selected user partition.
        if not selected_user_partition:
            SplitTestFields.user_partition_values.append(
                SplitTestFields.no_partition_selected)
        for user_partition in all_user_partitions:
            SplitTestFields.user_partition_values.append({
                "display_name":
                user_partition.name,
                "value":
                user_partition.id
            })
        return SplitTestFields.user_partition_values

    display_name = String(
        display_name=_("Display Name"),
        help=
        _("This name is used for organizing your course content, but is not shown to students."
          ),
        scope=Scope.settings,
        default=_("Content Experiment"))

    # Specified here so we can see what the value set at the course-level is.
    user_partitions = UserPartitionList(help=_(
        "The list of group configurations for partitioning students in content experiments."
    ),
                                        default=[],
                                        scope=Scope.settings)

    user_partition_id = Integer(
        help=
        _("The configuration defines how users are grouped for this content experiment. Caution: Changing the group configuration of a student-visible experiment will impact the experiment data."
          ),
        scope=Scope.content,
        display_name=_("Group Configuration"),
        default=no_partition_selected["value"],
        values=lambda: SplitTestFields.
        user_partition_values  # Will be populated before the Studio editor is shown.
    )

    # group_id is an int
    # child is a serialized UsageId (aka Location).  This child
    # location needs to actually match one of the children of this
    # Block.  (expected invariant that we'll need to test, and handle
    # authoring tools that mess this up)
    group_id_to_child = ReferenceValueDict(help=_(
        "Which child module students in a particular group_id should see"),
                                           scope=Scope.content)
Beispiel #27
0
class CombinedOpenEndedFields(object):
    display_name = String(
        display_name=_("Display Name"),
        help=
        _("This name appears in the horizontal navigation at the top of the page."
          ),
        default=_("Open Response Assessment"),
        scope=Scope.settings)
    current_task_number = Integer(
        help=_("Current task that the student is on."),
        default=0,
        scope=Scope.user_state)
    old_task_states = List(
        help=
        _("A list of lists of state dictionaries for student states that are saved. "
          "This field is only populated if the instructor changes tasks after "
          "the module is created and students have attempted it (for example, if a self assessed problem is "
          "changed to self and peer assessed)."),
        scope=Scope.user_state,
    )
    task_states = List(
        help=_("List of state dictionaries of each task within this module."),
        scope=Scope.user_state)
    state = String(
        help=_("Which step within the current task that the student is on."),
        default="initial",
        scope=Scope.user_state)
    graded = Boolean(
        display_name=_("Graded"),
        help=
        _("Defines whether the student gets credit for this problem. Credit is based on peer grades of this problem."
          ),
        default=False,
        scope=Scope.settings)
    student_attempts = Integer(
        help=_("Number of attempts taken by the student on this problem"),
        default=0,
        scope=Scope.user_state)
    ready_to_reset = Boolean(
        help=_("If the problem is ready to be reset or not."),
        default=False,
        scope=Scope.user_state)
    max_attempts = Integer(
        display_name=_("Maximum Attempts"),
        help=_(
            "The number of times the student can try to answer this problem."),
        default=1,
        scope=Scope.settings,
        values={"min": 1})
    accept_file_upload = Boolean(
        display_name=_("Allow File Uploads"),
        help=_("Whether or not the student can submit files as a response."),
        default=False,
        scope=Scope.settings)
    skip_spelling_checks = Boolean(
        display_name=_("Disable Quality Filter"),
        help=
        _("If False, the Quality Filter is enabled and submissions with poor spelling, short length, or poor grammar will not be peer reviewed."
          ),
        default=False,
        scope=Scope.settings)
    due = Date(help=_("Date that this problem is due by"),
               scope=Scope.settings)
    graceperiod = Timedelta(help=_(
        "Amount of time after the due date that submissions will be accepted"),
                            scope=Scope.settings)
    version = VersionInteger(help=_("Current version number"),
                             default=DEFAULT_VERSION,
                             scope=Scope.settings)
    data = String(help=_("XML data for the problem"),
                  scope=Scope.content,
                  default=DEFAULT_DATA)
    weight = Float(
        display_name=_("Problem Weight"),
        help=
        _("Defines the number of points each problem is worth. If the value is not set, each problem is worth one point."
          ),
        scope=Scope.settings,
        values={
            "min": 0,
            "step": ".1"
        },
        default=1)
    min_to_calibrate = Integer(
        display_name=_("Minimum Peer Grading Calibrations"),
        help=
        _("The minimum number of calibration essays each student will need to complete for peer grading."
          ),
        default=3,
        scope=Scope.settings,
        values={
            "min": 1,
            "max": 20,
            "step": "1"
        })
    max_to_calibrate = Integer(
        display_name=_("Maximum Peer Grading Calibrations"),
        help=
        _("The maximum number of calibration essays each student will need to complete for peer grading."
          ),
        default=6,
        scope=Scope.settings,
        values={
            "min": 1,
            "max": 20,
            "step": "1"
        })
    peer_grader_count = Integer(
        display_name=_("Peer Graders per Response"),
        help=_("The number of peers who will grade each submission."),
        default=3,
        scope=Scope.settings,
        values={
            "min": 1,
            "step": "1",
            "max": 5
        })
    required_peer_grading = Integer(
        display_name=_("Required Peer Grading"),
        help=
        _("The number of other students each student making a submission will have to grade."
          ),
        default=3,
        scope=Scope.settings,
        values={
            "min": 1,
            "step": "1",
            "max": 5
        })
    peer_grade_finished_submissions_when_none_pending = Boolean(
        display_name=_('Allow "overgrading" of peer submissions'),
        help=
        _("EXPERIMENTAL FEATURE.  Allow students to peer grade submissions that already have the requisite number of graders, "
          "but ONLY WHEN all submissions they are eligible to grade already have enough graders.  "
          "This is intended for use when settings for `Required Peer Grading` > `Peer Graders per Response`"
          ),
        default=False,
        scope=Scope.settings,
    )
    markdown = String(help=_("Markdown source of this module"),
                      default=textwrap.dedent("""\
                    [prompt]
                        <h3>Censorship in the Libraries</h3>

                        <p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
                        </p>

                        <p>
                        Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
                        </p>
                    [prompt]
                    [rubric]
                    + Ideas
                    - Difficult for the reader to discern the main idea.  Too brief or too repetitive to establish or maintain a focus.
                    - Attempts a main idea.  Sometimes loses focus or ineffectively displays focus.
                    - Presents a unifying theme or main idea, but may include minor tangents.  Stays somewhat focused on topic and task.
                    - Presents a unifying theme or main idea without going off on tangents.  Stays completely focused on topic and task.
                    + Content
                    - Includes little information with few or no details or unrelated details.  Unsuccessful in attempts to explore any facets of the topic.
                    - Includes little information and few or no details.  Explores only one or two facets of the topic.
                    - Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.)  Explores some facets of the topic.
                    - Includes in-depth information and exceptional supporting details that are fully developed.  Explores all facets of the topic.
                    + Organization
                    - Ideas organized illogically, transitions weak, and response difficult to follow.
                    - Attempts to logically organize ideas.  Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
                    - Ideas organized logically.  Progresses in an order that enhances meaning.  Includes smooth transitions.
                    + Style
                    - Contains limited vocabulary, with many words used incorrectly.  Demonstrates problems with sentence patterns.
                    - Contains basic vocabulary, with words that are predictable and common.  Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
                    - Includes vocabulary to make explanations detailed and precise.  Includes varied sentence patterns, including complex sentences.
                    + Voice
                    - Demonstrates language and tone that may be inappropriate to task and reader.
                    - Demonstrates an attempt to adjust language and tone to task and reader.
                    - Demonstrates effective adjustment of language and tone to task and reader.
                    [rubric]
                    [tasks]
                    (Self), ({4-12}AI), ({9-12}Peer)
                    [tasks]

        """),
                      scope=Scope.settings)
Beispiel #28
0
class GenericXBlock(XBlock):
    """XBlock for testing pure xblock xml import"""
    has_children = True
    field1 = String(default="something", scope=Scope.user_state)
    field2 = Integer(scope=Scope.user_state)
class BaseMentoringBlock(
        XBlock,
        XBlockWithTranslationServiceMixin,
        XBlockWithSettingsMixin,
        StudioEditableXBlockMixin,
        ThemableXBlockMixin,
        MessageParentMixin,
):
    """
    An XBlock that defines functionality shared by mentoring blocks.
    """
    # Content
    show_title = Boolean(display_name=_("Show title"),
                         help=_("Display the title?"),
                         default=True,
                         scope=Scope.content)
    max_attempts = Integer(
        display_name=_("Max. attempts allowed"),
        help=
        _("Maximum number of times students are allowed to attempt the questions belonging to this block"
          ),
        default=0,
        scope=Scope.content,
        enforce_type=True)
    weight = Float(display_name=_("Weight"),
                   help=_("Defines the maximum total grade of the block."),
                   default=1,
                   scope=Scope.settings,
                   enforce_type=True)

    # User state
    num_attempts = Integer(
        # Number of attempts a user has answered for this questions
        default=0,
        scope=Scope.user_state,
        enforce_type=True)

    has_children = True
    has_score = True  # The Problem/Step Builder XBlocks produce scores. (Their children do not send scores to the LMS.)

    icon_class = 'problem'
    block_settings_key = 'mentoring'
    options_key = 'options'

    default_theme_config = {
        'package': 'problem_builder',
        'locations': ['public/themes/lms.css']
    }

    @property
    def url_name(self):
        """
        Get the url_name for this block. In Studio/LMS it is provided by a mixin, so we just
        defer to super(). In the workbench or any other platform, we use the usage_id.
        """
        try:
            return super(BaseMentoringBlock, self).url_name
        except AttributeError:
            return unicode(self.scope_ids.usage_id)

    @property
    def review_tips_json(self):
        return json.dumps(self.review_tips)

    @property
    def max_attempts_reached(self):
        return self.max_attempts > 0 and self.num_attempts >= self.max_attempts

    def get_content_titles(self):
        """
        By default, each Sequential block in a course ("Subsection" in Studio parlance) will
        display the display_name of each descendant in a tooltip above the content. We don't
        want that - we only want to display one title for this mentoring block as a whole.
        Otherwise things like "Choice (yes) (Correct)" will appear in the tooltip.

        If this block has no title set, don't display any title. Then, if this is the only block
        in the unit, the unit's title will be used. (Why isn't it always just used?)
        """
        has_explicitly_set_title = self.fields['display_name'].is_set_on(self)
        if has_explicitly_set_title:
            return [self.display_name]
        return []

    def get_options(self):
        """
        Get options settings for this block from settings service.

        Fall back on default options if xblock settings have not been customized at all
        or no customizations for options available.
        """
        xblock_settings = self.get_xblock_settings(default={})
        if xblock_settings and self.options_key in xblock_settings:
            return xblock_settings[self.options_key]
        return _default_options_config

    def get_option(self, option):
        """
        Get value of a specific instance-wide `option`.
        """
        return self.get_options().get(option)

    @XBlock.json_handler
    def view(self, data, suffix=''):
        """
        Current HTML view of the XBlock, for refresh by client
        """
        frag = self.student_view({})
        return {'html': frag.content}

    @XBlock.json_handler
    def publish_event(self, data, suffix=''):
        """
        Publish data for analytics purposes
        """
        event_type = data.pop('event_type')
        if (event_type == 'grade'):
            # This handler can be called from the browser. Don't allow the browser to submit arbitrary grades ;-)
            raise JsonHandlerError(
                403, "Posting grade events from the browser is forbidden.")

        self.runtime.publish(self, event_type, data)
        return {'result': 'ok'}

    def author_preview_view(self, context):
        """
        Child blocks can override this to add a custom preview shown to
        authors in Studio when not editing this block's children.
        """
        fragment = self.student_view(context)
        fragment.add_content(
            loader.render_template('templates/html/mentoring_url_name.html',
                                   {"url_name": self.url_name}))
        fragment.add_css_url(
            self.runtime.local_resource_url(
                self, 'public/css/problem-builder-edit.css'))
        self.include_theme_files(fragment)
        return fragment

    def max_score(self):
        """ Maximum score. We scale all scores to a maximum of 1.0 so this is always 1.0 """
        return 1.0
Beispiel #30
0
class SecondMixin(XBlockMixin):
    """Test class for mixin ordering."""
    number = 2
    field = Integer(default=2)