Пример #1
0
class CombinedOpenEndedFields(object):
    display_name = String(
        display_name=_("Display Name"),
        help=
        _("This name appears in the horizontal navigation at the top of the page."
          ),
        default=_("Open Response Assessment"),
        scope=Scope.settings)
    current_task_number = Integer(
        help=_("Current task that the student is on."),
        default=0,
        scope=Scope.user_state)
    old_task_states = List(
        help=
        _("A list of lists of state dictionaries for student states that are saved. "
          "This field is only populated if the instructor changes tasks after "
          "the module is created and students have attempted it (for example, if a self assessed problem is "
          "changed to self and peer assessed)."),
        scope=Scope.user_state,
    )
    task_states = List(
        help=_("List of state dictionaries of each task within this module."),
        scope=Scope.user_state)
    state = String(
        help=_("Which step within the current task that the student is on."),
        default="initial",
        scope=Scope.user_state)
    graded = Boolean(
        display_name=_("Graded"),
        help=
        _("Defines whether the student gets credit for this problem. Credit is based on peer grades of this problem."
          ),
        default=False,
        scope=Scope.settings)
    student_attempts = Integer(
        help=_("Number of attempts taken by the student on this problem"),
        default=0,
        scope=Scope.user_state)
    ready_to_reset = Boolean(
        help=_("If the problem is ready to be reset or not."),
        default=False,
        scope=Scope.user_state)
    max_attempts = Integer(
        display_name=_("Maximum Attempts"),
        help=_(
            "The number of times the student can try to answer this problem."),
        default=1,
        scope=Scope.settings,
        values={"min": 1})
    accept_file_upload = Boolean(
        display_name=_("Allow File Uploads"),
        help=_("Whether or not the student can submit files as a response."),
        default=False,
        scope=Scope.settings)
    skip_spelling_checks = Boolean(
        display_name=_("Disable Quality Filter"),
        help=
        _("If False, the Quality Filter is enabled and submissions with poor spelling, short length, or poor grammar will not be peer reviewed."
          ),
        default=False,
        scope=Scope.settings)
    due = Date(help=_("Date that this problem is due by"),
               scope=Scope.settings)
    graceperiod = Timedelta(help=_(
        "Amount of time after the due date that submissions will be accepted"),
                            scope=Scope.settings)
    version = VersionInteger(help=_("Current version number"),
                             default=DEFAULT_VERSION,
                             scope=Scope.settings)
    data = String(help=_("XML data for the problem"),
                  scope=Scope.content,
                  default=DEFAULT_DATA)
    weight = Float(
        display_name=_("Problem Weight"),
        help=
        _("Defines the number of points each problem is worth. If the value is not set, each problem is worth one point."
          ),
        scope=Scope.settings,
        values={
            "min": 0,
            "step": ".1"
        },
        default=1)
    min_to_calibrate = Integer(
        display_name=_("Minimum Peer Grading Calibrations"),
        help=
        _("The minimum number of calibration essays each student will need to complete for peer grading."
          ),
        default=3,
        scope=Scope.settings,
        values={
            "min": 1,
            "max": 20,
            "step": "1"
        })
    max_to_calibrate = Integer(
        display_name=_("Maximum Peer Grading Calibrations"),
        help=
        _("The maximum number of calibration essays each student will need to complete for peer grading."
          ),
        default=6,
        scope=Scope.settings,
        values={
            "min": 1,
            "max": 20,
            "step": "1"
        })
    peer_grader_count = Integer(
        display_name=_("Peer Graders per Response"),
        help=_("The number of peers who will grade each submission."),
        default=3,
        scope=Scope.settings,
        values={
            "min": 1,
            "step": "1",
            "max": 5
        })
    required_peer_grading = Integer(
        display_name=_("Required Peer Grading"),
        help=
        _("The number of other students each student making a submission will have to grade."
          ),
        default=3,
        scope=Scope.settings,
        values={
            "min": 1,
            "step": "1",
            "max": 5
        })
    peer_grade_finished_submissions_when_none_pending = Boolean(
        display_name=_('Allow "overgrading" of peer submissions'),
        help=
        _("EXPERIMENTAL FEATURE.  Allow students to peer grade submissions that already have the requisite number of graders, "
          "but ONLY WHEN all submissions they are eligible to grade already have enough graders.  "
          "This is intended for use when settings for `Required Peer Grading` > `Peer Graders per Response`"
          ),
        default=False,
        scope=Scope.settings,
    )
    markdown = String(help=_("Markdown source of this module"),
                      default=textwrap.dedent("""\
                    [prompt]
                        <h3>Censorship in the Libraries</h3>

                        <p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
                        </p>

                        <p>
                        Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
                        </p>
                    [prompt]
                    [rubric]
                    + Ideas
                    - Difficult for the reader to discern the main idea.  Too brief or too repetitive to establish or maintain a focus.
                    - Attempts a main idea.  Sometimes loses focus or ineffectively displays focus.
                    - Presents a unifying theme or main idea, but may include minor tangents.  Stays somewhat focused on topic and task.
                    - Presents a unifying theme or main idea without going off on tangents.  Stays completely focused on topic and task.
                    + Content
                    - Includes little information with few or no details or unrelated details.  Unsuccessful in attempts to explore any facets of the topic.
                    - Includes little information and few or no details.  Explores only one or two facets of the topic.
                    - Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.)  Explores some facets of the topic.
                    - Includes in-depth information and exceptional supporting details that are fully developed.  Explores all facets of the topic.
                    + Organization
                    - Ideas organized illogically, transitions weak, and response difficult to follow.
                    - Attempts to logically organize ideas.  Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
                    - Ideas organized logically.  Progresses in an order that enhances meaning.  Includes smooth transitions.
                    + Style
                    - Contains limited vocabulary, with many words used incorrectly.  Demonstrates problems with sentence patterns.
                    - Contains basic vocabulary, with words that are predictable and common.  Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
                    - Includes vocabulary to make explanations detailed and precise.  Includes varied sentence patterns, including complex sentences.
                    + Voice
                    - Demonstrates language and tone that may be inappropriate to task and reader.
                    - Demonstrates an attempt to adjust language and tone to task and reader.
                    - Demonstrates effective adjustment of language and tone to task and reader.
                    [rubric]
                    [tasks]
                    (Self), ({4-12}AI), ({9-12}Peer)
                    [tasks]

        """),
                      scope=Scope.settings)
Пример #2
0
class FeedbackXBlock(XBlock):
    """
    This is an XBlock -- eventually, hopefully an aside -- which
    allows you to feedback content in the course. We've wanted this for a
    long time, but Dartmouth finally encourage me to start to build
    this.
    """
    # This is a list of prompts. If we have multiple elements in the
    # list, one will be chosen at random. This is currently not
    # exposed in the UX. If the prompt is missing any portions, we
    # will default to the ones in default_prompt.
    prompts = List(
        default=[
            {'freeform': DEFAULT_FREEFORM,
             'default_text': DEFAULT_DEFAULT,
             'likert': DEFAULT_LIKERT,
             'placeholder': DEFAULT_PLACEHOLDER,
             'scale_text': DEFAULT_SCALETEXT,
             'icon_set': DEFAULT_ICON}
        ],
        scope=Scope.settings,
        help="Freeform user prompt",
        xml_node=True
    )

    prompt_choice = Integer(
        default=-1, scope=Scope.user_state,
        help="Random number generated for p. -1 if uninitialized"
    )

    user_vote = Integer(
        default=-1, scope=Scope.user_state,
        help="How user voted. -1 if didn't vote"
    )

    # pylint: disable=invalid-name
    p = Float(
        default=100, scope=Scope.settings,
        help="What percent of the time should this show?"
    )

    p_user = Float(
        default=-1, scope=Scope.user_state,
        help="Random number generated for p. -1 if uninitialized"
    )

    vote_aggregate = List(
        default=None, scope=Scope.user_state_summary,
        help="A list of user votes"
    )

    user_freeform = String(default="", scope=Scope.user_state,
                           help="Feedback")

    display_name = String(
        display_name="Display Name",
        default="Provide Feedback",
        scopde=Scope.settings
    )

    @classmethod
    def resource_string(cls, path):
        """Handy helper for getting resources from our kit."""
        data = pkg_resources.resource_string(__name__, path)
        return data.decode("utf8")

    def get_prompt(self, index=-1):
        """
        Return the current prompt dictionary, doing appropriate
        randomization if necessary, and falling back to defaults when
        necessary.
        """
        if index == -1:
            index = self.prompt_choice

        _ = self.runtime.service(self, 'i18n').ugettext
        # This is the default prompt if something is not specified in the
        # settings dictionary. Note that this is not the same as the default
        # above. The default above is the prompt the instructor starts from
        # in a tool like Studio. This is a fallback in case some JSON fields
        # are left unpopulated (e.g. if someone manually tweaks the database,
        # in case of OLX authoring, and similar). The examplar above is
        # intended as a well-structured, coherent response. This is designed
        # as generic, to work with any content as a safe fallback.
        prompt = {
            'freeform': _("Please reflect on this course material"),
            'default_text': _("Please take time to meaningfully reflect "
                              "on your experience with this course "
                              "material."),
            'likert': _("Please rate your overall experience"),
            'scale_text': [_("Excellent"),
                           _("Good"),
                           _("Average"),
                           _("Fair"),
                           _("Poor")],
            'icon_set': 'num',
            'placeholder': "Please take a moment to thoughtfully reflect."
        }

        prompt.update(self.prompts[index])
        return prompt

    def student_view(self, context=None):
        """
        The primary view of the FeedbackXBlock, shown to students
        when viewing courses.
        """
        # Figure out which prompt we show. We set self.prompt_choice to
        # the index of the prompt. We set it if it is out of range (either
        # uninitiailized, or incorrect due to changing list length). Then,
        # we grab the prompt, prepopulated with defaults.
        if self.prompt_choice < 0 or self.prompt_choice >= len(self.prompts):
            self.prompt_choice = random.randint(0, len(self.prompts) - 1)
        prompt = self.get_prompt()

        # Now, we render the FeedbackXBlock.
        html = self.resource_string("static/html/feedback.html")

        # Staff see vote totals, so we have slightly different HTML here.
        if self.vote_aggregate and self.is_staff():
            scale_item = self.resource_string("static/html/staff_item.html")
        else:
            scale_item = self.resource_string("static/html/scale_item.html")
        # The replace allows us to format the HTML nicely without getting
        # extra whitespace
        scale_item = scale_item.replace('\n', '')

        # We have five Likert fields right now, but we'd like this to
        # be dynamic
        indexes = range(5)

        # If the user voted before, we'd like to show that
        active_vote = ["checked" if i == self.user_vote else ""
                       for i in indexes]

        # Confirm that we do have vote totals (this may be uninitialized
        # otherwise). This should probably go into __init__ or similar.
        self.init_vote_aggregate()
        votes = self.vote_aggregate

        # We grab the icons. This should move to a Filesystem field so
        # instructors can upload new ones
        def get_url(icon_type, i):
            '''
            Helper function to generate the URL for the icons shown in the
            tool. Takes the type of icon (active, inactive, etc.) and
            the number of the icon.

            Note that some icon types may not be actively used in the
            styling. For example, at the time of this writing, we do
            selected through CSS, rather than by using those icons.
            '''
            templates = {'inactive': 'public/default_icons/i{set}{i}.png?raw',
                         'active': 'public/default_icons/a{set}{i}.png?raw',
                         'selected': 'public/default_icons/s{set}{i}.png?raw'}
            template = templates[icon_type]
            icon_file = template.format(i=i, set=prompt['icon_set'])
            return self.runtime.local_resource_url(self, icon_file)
        ina_urls = [get_url('inactive', i) for i in range(1, 6)]
        act_urls = [get_url('active', i) for i in range(1, 6)]
        sel_urls = [get_url('selected', i) for i in range(1, 6)]

        # Render the Likert scale (not the whole page)
        scale = u"".join(
            scale_item.format(scale_text=scale_text,
                              unicode_icon=unicode_icon,
                              idx=idx,
                              active=active,
                              vote_cnt=vote_cnt,
                              ina_icon=ina_icon,
                              act_icon=act_icon,
                              sel_icon=sel_icon) for
            (scale_text,
             unicode_icon,
             idx,
             active,
             vote_cnt,
             act_icon,
             ina_icon,
             sel_icon) in
            zip(prompt['scale_text'],
                ICON_SETS[(prompt['icon_set'])],
                indexes,
                active_vote,
                votes,
                act_urls,
                ina_urls,
                sel_urls)
        )
        if self.user_vote != -1:
            _ = self.runtime.service(self, 'i18n').ugettext
            response = _("Thank you for voting!")
        else:
            response = ""
        # Now, render the whole page
        rendered = html.format(self=self,
                               scale=scale,
                               freeform_prompt=prompt['freeform'],
                               likert_prompt=prompt['likert'],
                               response=response,
                               placeholder=prompt['placeholder'])

        # We initialize self.p_user if not initialized -- this sets whether
        # or not we show it. From there, if it is less than odds of showing,
        # we set the fragment to the rendered XBlock. Otherwise, we return
        # empty HTML. There ought to be a way to return None, but XBlocks
        # doesn't support that.
        if self.p_user == -1:
            self.p_user = random.uniform(0, 100)
        if self.p_user < self.p:
            frag = Fragment(rendered)
        else:
            frag = Fragment(u"")

        # Finally, we do the standard JS+CSS boilerplate. Honestly, XBlocks
        # ought to have a sane default here.
        frag.add_css(self.resource_string("static/css/feedback.css"))
        frag.add_javascript(self.resource_string("static/js/src/feedback.js"))
        frag.initialize_js('FeedbackXBlock')
        return frag

    def studio_view(self, context):
        """
        Create a fragment used to display the edit view in the Studio.
        """
        html_str = self.resource_string("static/html/studio_view.html")
        prompt = self.get_prompt(0)
        for idx in range(len(prompt['scale_text'])):
            prompt['likert{i}'.format(i=idx)] = prompt['scale_text'][idx]
        frag = Fragment(unicode(html_str).format(**prompt))
        js_str = self.resource_string("static/js/src/studio.js")
        frag.add_javascript(unicode(js_str))
        frag.initialize_js('FeedbackBlock',
                           {'icon_set': prompt['icon_set']})
        return frag

    @XBlock.json_handler
    def studio_submit(self, data, suffix=''):
        """
        Called when submitting the form in Studio.
        """
        for item in ['freeform', 'likert', 'placeholder', 'icon_set']:
            item_submission = data.get(item, None)
            if item_submission and len(item_submission) > 0:
                self.prompts[0][item] = cgi.escape(item_submission)
        for i in range(5):
            likert = data.get('likert{i}'.format(i=i), None)
            if likert and len(likert) > 0:
                self.prompts[0]['scale_text'][i] = cgi.escape(likert)

        return {'result': 'success'}

    def init_vote_aggregate(self):
        '''
        There are a lot of places we read the aggregate vote counts. We
        start out with these uninitialized. This guarantees they are
        initialized. We'd prefer to do it this way, rather than default
        value, since we do plan to not force scale length to be 5 in the
        future.
        '''
        if not self.vote_aggregate:
            self.vote_aggregate = [0] * (len(self.get_prompt()['scale_text']))

    def vote(self, data):
        """
        Handle voting
        """
        # prompt_choice is initialized by student view.
        # Ideally, we'd break this out into a function.
        prompt = self.get_prompt(self.prompt_choice)

        # Make sure we're initialized
        self.init_vote_aggregate()

        # Remove old vote if we voted before
        if self.user_vote != -1:
            self.vote_aggregate[self.user_vote] -= 1

        self.user_vote = data['vote']
        self.vote_aggregate[self.user_vote] += 1

    @XBlock.json_handler
    def feedback(self, data, suffix=''):
        '''
        Allow students to submit feedback, both numerical and
        qualitative. We only update the specific type of feedback
        submitted.

        We return the current state. While this is not used by the
        client code, it is helpful for testing. For staff users, we
        also return the aggregate results.
        '''
        _ = self.runtime.service(self, 'i18n').ugettext

        if 'freeform' not in data and 'vote' not in data:
            response = {"success": False,
                        "response": _("Please vote!")}
            self.runtime.publish(self,
                                 'edx.feedbackxblock.nothing_provided',
                                 {})
        if 'vote' in data:
            response = {"success": True,
                        "response": _("Thank you for voting!")}
            self.runtime.publish(self,
                                 'edx.feedbackxblock.likert_provided',
                                 {'old_vote': self.user_vote,
                                  'new_vote': data['vote']})
            self.vote(data)
        if 'freeform' in data:
            response = {"success": True,
                        "response": _("Thank you for your feedback!")}
            self.runtime.publish(self,
                                 'edx.feedbackxblock.freeform_provided',
                                 {'old_freeform': self.user_freeform,
                                  'new_freeform': data['freeform']})
            self.user_freeform = data['freeform']

        response.update({
            "freeform": self.user_freeform,
            "vote": self.user_vote
        })

        if self.is_staff():
            response['aggregate'] = self.vote_aggregate

        return response

    @staticmethod
    def workbench_scenarios():
        """
        A canned scenario for display in the workbench.

        We have three blocks. One shows up all the time (for testing). The
        other two show up 50% of the time.
        """
        return [
            ("FeedbackXBlock",
             """<vertical_demo>
                <feedback p="100"/>
                <feedback p="50"/>
                <feedback p="50"/>
                </vertical_demo>
             """),
        ]

    def is_staff(self):
        """
        Return self.xmodule_runtime.user_is_staff if available

        This is not a supported part of the XBlocks API in all
        runtimes, and this is a workaround so something reasonable
        happens in both workbench and edx-platform
        """
        if hasattr(self, "xmodule_runtime") and \
           hasattr(self.xmodule_runtime, "user_is_staff"):
            return self.xmodule_runtime.user_is_staff
        else:
            # In workbench and similar settings, always return true
            return True
Пример #3
0
class HighlightsFields(object):
    """Only Sections have summaries now, but we may expand that later."""
    highlights = List(help=_(
        "A list summarizing what students should look forward to in this section."
    ),
                      scope=Scope.settings)
Пример #4
0
class PlatformTourXBlock(XBlock):
    """
    Allows students to tour through the course and get familiar with the
    platform.
    """

    loader = ResourceLoader(__name__)

    display_name = String(
        display_name=('Display Name'),
        help=('The title for this component'),
        default='Platform Tour',
        scope=Scope.settings,
    )
    button_label = String(
        display_name=('Button label'),
        help=('The text that will appear on the button on which learners click'
              ' to start the Platform Tour.'),
        default='Begin Platform Tour',
        scope=Scope.settings,
    )
    intro = String(
        display_name=('Introduction text'),
        help=('The introduction that will precede the button'
              ' and explain its presence to the user'),
        default='Click the button below to learn how to navigate the platform.',
        scope=Scope.settings,
    )
    enabled_default_steps = List(
        display_name=('Choose the steps for the Platform Tour'),
        help=('List representing steps of the tour'),
        default=None,
        multiline_editor=True,
        scope=Scope.settings,
        resettable_editor=False,
    )
    custom_steps = List(
        display_name=('Custom steps for the platform tour'),
        help=('JSON dictionaries representing additional steps of the tour'),
        default=[],
        multiline_editor=True,
        scope=Scope.settings,
    )

    def get_resource_url(self, path):
        """
        Retrieve a public URL for the file path
        """
        path = os.path.join('public', path)
        resource_url = self.runtime.local_resource_url(self, path)
        return resource_url

    def build_fragment(
        self,
        rendered_template,
        initialize_js_func,
        additional_css=None,
        additional_js=None,
    ):
        """
        Build the HTML fragment, and add required static assets to it.
        """
        additional_css = additional_css or []
        additional_js = additional_js or []
        fragment = Fragment(rendered_template)
        for item in additional_css:
            url = self.get_resource_url(item)
            fragment.add_css_url(url)
        for item in additional_js:
            url = self.get_resource_url(item)
            fragment.add_javascript_url(url)
        fragment.initialize_js(initialize_js_func)
        return fragment

    def student_view(self, context=None):
        """
        The primary view of the PlatformTourXBlock, shown to students
        when viewing courses.
        """
        enabled_default_step_keys = self.enabled_default_steps
        if enabled_default_step_keys is None:
            enabled_default_step_keys = default_steps.get_default_keys()
        step_choice_dict = default_steps.get_display_steps(
            enabled_default_step_keys)
        if 'custom' in enabled_default_step_keys:
            step_choice_dict.extend(self.custom_steps)
        steps = json.dumps(step_choice_dict)

        context = context or {}
        context.update({
            'display_name': self.display_name,
            'button_label': self.button_label,
            'intro': self.intro,
            'steps': steps,
        })
        rendered_template = self.loader.render_django_template(
            'templates/platformtour.html',
            context=Context(context),
        )
        fragment = self.build_fragment(
            rendered_template,
            initialize_js_func='PlatformTourXBlock',
            additional_css=[
                'css/platformtour.css',
            ],
            additional_js=[
                'js/src/intro.js',
                'js/src/platformtour.js',
            ],
        )
        return fragment

    def studio_view(self, context=None):
        """
        Build the fragment for the edit/studio view
        Implementation is optional.
        """
        enabled_default_step_keys = self.enabled_default_steps
        if enabled_default_step_keys is None:
            enabled_default_step_keys = default_steps.get_default_keys()
        context = context or {}
        context.update({
            'display_name':
            self.display_name,
            'button_label':
            self.button_label,
            'intro':
            self.intro,
            'enabled_default_steps':
            default_steps.get_choices(enabled_default_step_keys),
            'custom_steps':
            json.dumps(self.custom_steps),
        })
        rendered_template = self.loader.render_django_template(
            'templates/platformtour_studio.html',
            context=Context(context),
        )
        fragment = self.build_fragment(
            rendered_template,
            initialize_js_func='PlatformTourStudioUI',
            additional_css=[
                'css/platformtour_studio.css',
            ],
            additional_js=[
                'js/src/platformtour_studio.js',
            ],
        )
        return fragment

    @XBlock.json_handler
    def studio_view_save(self, data, suffix=''):
        """
        Save XBlock fields
        Returns: the new field values
        """

        self.display_name = data['display_name']
        self.button_label = data['button_label']
        self.intro = data['intro']
        self.enabled_default_steps = data['enabled_default_steps']
        self.custom_steps = data['custom_steps']

        return {
            'display_name': self.display_name,
            'button_label': self.button_label,
            'intro': self.intro,
            'enabled_default_steps': self.enabled_default_steps,
            'custom_steps': self.custom_steps,
        }

    # TO-DO: change this to create the scenarios you'd like to see in the
    # workbench while developing your XBlock.
    @staticmethod
    def workbench_scenarios():
        """
        A canned scenario for display in the workbench.
        """
        return [
            ("PlatformTourXBlock", """<platformtour/>
             """),
            ("Multiple PlatformTourXBlock", """<vertical_demo>
                    <platformtour
                        display_name="Platform Tour 1"
                        button_label="Start Tour #1"
                        intro="This is the Platform Tour #1, click the button to start."
                    />
                    <platformtour
                        display_name="Platform Tour 2"
                        button_label="Start Tour #2"
                        intro="This is the Platform Tour #2, click the button to start."
                    />
                    <platformtour
                        display_name="Platform Tour 3"
                        button_label="Start Tour #3"
                        intro="This is the Platform Tour #3, click the button to start."
                    />
                </vertical_demo>
             """),
        ]
class LibraryContentFields(object):
    """
    Fields for the LibraryContentModule.

    Separated out for now because they need to be added to the module and the
    descriptor.
    """
    # Please note the display_name of each field below is used in
    # common/test/acceptance/pages/studio/library.py:StudioLibraryContentXBlockEditModal
    # to locate input elements - keep synchronized
    display_name = String(
        display_name=_("Display Name"),
        help=_("Display name for this module"),
        default="Randomized Content Block",
        scope=Scope.settings,
    )
    source_library_id = String(
        display_name=_("Library"),
        help=_("Select the library from which you want to draw content."),
        scope=Scope.settings,
        values_provider=lambda instance: instance.source_library_values(),
    )
    source_library_version = String(
        # This is a hidden field that stores the version of source_library when we last pulled content from it
        display_name=_("Library Version"),
        scope=Scope.settings,
    )
    mode = String(
        display_name=_("Mode"),
        help=_("Determines how content is drawn from the library"),
        default="random",
        values=
        [{
            "display_name": _("Choose n at random"),
            "value": "random"
        }
         # Future addition: Choose a new random set of n every time the student refreshes the block, for self tests
         # Future addition: manually selected blocks
         ],
        scope=Scope.settings,
    )
    max_count = Integer(
        display_name=_("Count"),
        help=_("Enter the number of components to display to each student."),
        default=1,
        scope=Scope.settings,
    )
    capa_type = String(
        display_name=_("Problem Type"),
        help=
        _('Choose a problem type to fetch from the library. If "Any Type" is selected no filtering is applied.'
          ),
        default=ANY_CAPA_TYPE_VALUE,
        values=_get_capa_types(),
        scope=Scope.settings,
    )
    filters = String(default="")  # TBD
    has_score = Boolean(
        display_name=_("Scored"),
        help=
        _("Set this value to True if this module is either a graded assignment or a practice problem."
          ),
        default=False,
        scope=Scope.settings,
    )
    selected = List(
        # This is a list of (block_type, block_id) tuples used to record
        # which random/first set of matching blocks was selected per user
        default=[],
        scope=Scope.user_state,
    )
    has_children = True

    @property
    def source_library_key(self):
        """
        Convenience method to get the library ID as a LibraryLocator and not just a string
        """
        return LibraryLocator.from_string(self.source_library_id)
Пример #6
0
class XBlock(XmlSerializationMixin, HierarchyMixin, ScopedStorageMixin, RuntimeServicesMixin, HandlersMixin,
             IndexInfoMixin, ViewsMixin, SharedBlockBase):
    """Base class for XBlocks.

    Derive from this class to create a new kind of XBlock.  There are no
    required methods, but you will probably need at least one view.

    Don't provide the ``__init__`` method when deriving from this class.

    """
    entry_point = 'xblock.v1'

    name = String(help="Short name for the block", scope=Scope.settings)
    tags = List(help="Tags for this block", scope=Scope.settings)

    @class_lazy
    def _class_tags(cls):  # pylint: disable=no-self-argument
        """
        Collect the tags from all base classes.
        """
        class_tags = set()

        for base in cls.mro()[1:]:  # pylint: disable=no-member
            class_tags.update(getattr(base, '_class_tags', set()))

        return class_tags

    @staticmethod
    def tag(tags):
        """Returns a function that adds the words in `tags` as class tags to this class."""
        def dec(cls):
            """Add the words in `tags` as class tags to this class."""
            # Add in this class's tags
            cls._class_tags.update(tags.replace(",", " ").split())  # pylint: disable=protected-access
            return cls
        return dec

    @classmethod
    def load_tagged_classes(cls, tag, fail_silently=True):
        """
        Produce a sequence of all XBlock classes tagged with `tag`.

        fail_silently causes the code to simply log warnings if a
        plugin cannot import. The goal is to be able to use part of
        libraries from an XBlock (and thus have it installed), even if
        the overall XBlock cannot be used (e.g. depends on Django in a
        non-Django application). There is diagreement about whether
        this is a good idea, or whether we should see failures early
        (e.g. on startup or first page load), and in what
        contexts. Hence, the flag.
        """
        # Allow this method to access the `_class_tags`
        # pylint: disable=W0212
        for name, class_ in cls.load_classes(fail_silently):
            if tag in class_._class_tags:
                yield name, class_

    # pylint: disable=keyword-arg-before-vararg
    def __init__(self, runtime, field_data=None, scope_ids=UNSET, *args, **kwargs):
        """
        Construct a new XBlock.

        This class should only be instantiated by runtimes.

        Arguments:

            runtime (:class:`.Runtime`): Use it to access the environment.
                It is available in XBlock code as ``self.runtime``.

            field_data (:class:`.FieldData`): Interface used by the XBlock
                fields to access their data from wherever it is persisted.
                Deprecated.

            scope_ids (:class:`.ScopeIds`): Identifiers needed to resolve
                scopes.
        """
        if scope_ids is UNSET:
            raise TypeError('scope_ids are required')

        # Provide backwards compatibility for external access through _field_data
        super(XBlock, self).__init__(runtime=runtime, scope_ids=scope_ids, field_data=field_data, *args, **kwargs)

    def render(self, view, context=None):
        """Render `view` with this block's runtime and the supplied `context`"""
        return self.runtime.render(self, view, context)

    def validate(self):
        """
        Ask this xblock to validate itself. Subclasses are expected to override this
        method, as there is currently only a no-op implementation. Any overriding method
        should call super to collect validation results from its superclasses, and then
        add any additional results as necessary.
        """
        return Validation(self.scope_ids.usage_id)

    def ugettext(self, text):
        """
        Translates message/text and returns it in a unicode string.
        Using runtime to get i18n service.
        """
        runtime_service = self.runtime.service(self, "i18n")
        runtime_ugettext = runtime_service.ugettext
        return runtime_ugettext(text)

    def add_xml_to_node(self, node):
        """
        For exporting, set data on etree.Element `node`.
        """
        super(XBlock, self).add_xml_to_node(node)
        # Add children for each of our children.
        self.add_children_to_node(node)
Пример #7
0
class LibraryContentBlock(
    MakoTemplateBlockBase,
    XmlMixin,
    XModuleDescriptorToXBlockMixin,
    XModuleToXBlockMixin,
    HTMLSnippet,
    ResourceTemplates,
    XModuleMixin,
    StudioEditableBlock,
):
    """
    An XBlock whose children are chosen dynamically from a content library.
    Can be used to create randomized assessments among other things.

    Note: technically, all matching blocks from the content library are added
    as children of this block, but only a subset of those children are shown to
    any particular student.
    """
    # pylint: disable=abstract-method
    has_children = True
    has_author_view = True

    resources_dir = 'assets/library_content'

    preview_view_js = {
        'js': [],
        'xmodule_js': resource_string(__name__, 'js/src/xmodule.js'),
    }
    preview_view_css = {
        'scss': [],
    }

    mako_template = 'widgets/metadata-edit.html'
    studio_js_module_name = "VerticalDescriptor"
    studio_view_js = {
        'js': [
            resource_string(__name__, 'js/src/vertical/edit.js'),
        ],
        'xmodule_js': resource_string(__name__, 'js/src/xmodule.js'),
    }
    studio_view_css = {
        'scss': [],
    }

    show_in_read_only_mode = True

    completion_mode = XBlockCompletionMode.AGGREGATOR

    display_name = String(
        display_name=_("Display Name"),
        help=_("The display name for this component."),
        default="Randomized Content Block",
        scope=Scope.settings,
    )
    source_library_id = String(
        display_name=_("Library"),
        help=_("Select the library from which you want to draw content."),
        scope=Scope.settings,
        values_provider=lambda instance: instance.source_library_values(),
    )
    source_library_version = String(
        # This is a hidden field that stores the version of source_library when we last pulled content from it
        display_name=_("Library Version"),
        scope=Scope.settings,
    )
    mode = String(
        display_name=_("Mode"),
        help=_("Determines how content is drawn from the library"),
        default="random",
        values=[
            {"display_name": _("Choose n at random"), "value": "random"}
            # Future addition: Choose a new random set of n every time the student refreshes the block, for self tests
            # Future addition: manually selected blocks
        ],
        scope=Scope.settings,
    )
    max_count = Integer(
        display_name=_("Count"),
        help=_("Enter the number of components to display to each student."),
        default=1,
        scope=Scope.settings,
    )
    capa_type = String(
        display_name=_("Problem Type"),
        help=_('Choose a problem type to fetch from the library. If "Any Type" is selected no filtering is applied.'),
        default=ANY_CAPA_TYPE_VALUE,
        values=_get_capa_types(),
        scope=Scope.settings,
    )
    selected = List(
        # This is a list of (block_type, block_id) tuples used to record
        # which random/first set of matching blocks was selected per user
        default=[],
        scope=Scope.user_state,
    )

    @property
    def source_library_key(self):
        """
        Convenience method to get the library ID as a LibraryLocator and not just a string
        """
        return LibraryLocator.from_string(self.source_library_id)

    @classmethod
    def make_selection(cls, selected, children, max_count, mode):
        """
        Dynamically selects block_ids indicating which of the possible children are displayed to the current user.

        Arguments:
            selected - list of (block_type, block_id) tuples assigned to this student
            children - children of this block
            max_count - number of components to display to each student
            mode - how content is drawn from the library

        Returns:
            A dict containing the following keys:

            'selected' (set) of (block_type, block_id) tuples assigned to this student
            'invalid' (set) of dropped (block_type, block_id) tuples that are no longer valid
            'overlimit' (set) of dropped (block_type, block_id) tuples that were previously selected
            'added' (set) of newly added (block_type, block_id) tuples
        """
        rand = random.Random()

        selected_keys = set(tuple(k) for k in selected)  # set of (block_type, block_id) tuples assigned to this student

        # Determine which of our children we will show:
        valid_block_keys = set((c.block_type, c.block_id) for c in children)

        # Remove any selected blocks that are no longer valid:
        invalid_block_keys = (selected_keys - valid_block_keys)
        if invalid_block_keys:
            selected_keys -= invalid_block_keys

        # If max_count has been decreased, we may have to drop some previously selected blocks:
        overlimit_block_keys = set()
        if len(selected_keys) > max_count:
            num_to_remove = len(selected_keys) - max_count
            overlimit_block_keys = set(rand.sample(selected_keys, num_to_remove))
            selected_keys -= overlimit_block_keys

        # Do we have enough blocks now?
        num_to_add = max_count - len(selected_keys)

        added_block_keys = None
        if num_to_add > 0:
            # We need to select [more] blocks to display to this user:
            pool = valid_block_keys - selected_keys
            if mode == "random":
                num_to_add = min(len(pool), num_to_add)
                added_block_keys = set(rand.sample(pool, num_to_add))
                # We now have the correct n random children to show for this user.
            else:
                raise NotImplementedError("Unsupported mode.")
            selected_keys |= added_block_keys

        if any((invalid_block_keys, overlimit_block_keys, added_block_keys)):
            selected = list(selected_keys)
            random.shuffle(selected)

        return {
            'selected': selected,
            'invalid': invalid_block_keys,
            'overlimit': overlimit_block_keys,
            'added': added_block_keys,
        }

    def _publish_event(self, event_name, result, **kwargs):
        """
        Helper method to publish an event for analytics purposes
        """
        event_data = {
            "location": six.text_type(self.location),
            "result": result,
            "previous_count": getattr(self, "_last_event_result_count", len(self.selected)),
            "max_count": self.max_count,
        }
        event_data.update(kwargs)
        self.runtime.publish(self, "edx.librarycontentblock.content.{}".format(event_name), event_data)
        self._last_event_result_count = len(result)  # pylint: disable=attribute-defined-outside-init

    @classmethod
    def publish_selected_children_events(cls, block_keys, format_block_keys, publish_event):
        """
        Helper method for publishing events when children blocks are
        selected/updated for a user.  This helper is also used by
        the ContentLibraryTransformer.

        Arguments:

            block_keys -
                A dict describing which events to publish (add or
                remove), see `make_selection` above for format details.

            format_block_keys -
                A function to convert block keys to the format expected
                by publish_event. Must have the signature:

                    [(block_type, block_id)] -> T

                Where T is a collection of block keys as accepted by
                `publish_event`.

            publish_event -
                Function that handles the actual publishing.  Must have
                the signature:

                    <'removed'|'assigned'> -> result:T -> removed:T -> reason:str -> None

                Where T is a collection of block_keys as returned by
                `format_block_keys`.
        """
        if block_keys['invalid']:
            # reason "invalid" means deleted from library or a different library is now being used.
            publish_event(
                "removed",
                result=format_block_keys(block_keys['selected']),
                removed=format_block_keys(block_keys['invalid']),
                reason="invalid"
            )

        if block_keys['overlimit']:
            publish_event(
                "removed",
                result=format_block_keys(block_keys['selected']),
                removed=format_block_keys(block_keys['overlimit']),
                reason="overlimit"
            )

        if block_keys['added']:
            publish_event(
                "assigned",
                result=format_block_keys(block_keys['selected']),
                added=format_block_keys(block_keys['added'])
            )

    def selected_children(self):
        """
        Returns a list() of block_ids indicating which of the possible children
        have been selected to display to the current user.

        This reads and updates the "selected" field, which has user_state scope.

        Note: the return value (self.selected) contains block_ids. To get
        actual BlockUsageLocators, it is necessary to use self.children,
        because the block_ids alone do not specify the block type.
        """
        block_keys = self.make_selection(self.selected, self.children, self.max_count, "random")  # pylint: disable=no-member

        # Publish events for analytics purposes:
        lib_tools = self.runtime.service(self, 'library_tools')
        format_block_keys = lambda keys: lib_tools.create_block_analytics_summary(self.location.course_key, keys)
        self.publish_selected_children_events(
            block_keys,
            format_block_keys,
            self._publish_event,
        )

        if any(block_keys[changed] for changed in ('invalid', 'overlimit', 'added')):
            # Save our selections to the user state, to ensure consistency:
            selected = block_keys['selected']
            self.selected = selected  # TODO: this doesn't save from the LMS "Progress" page.

        return self.selected

    def _get_selected_child_blocks(self):
        """
        Generator returning XBlock instances of the children selected for the
        current user.
        """
        for block_type, block_id in self.selected_children():
            yield self.runtime.get_block(self.location.course_key.make_usage_key(block_type, block_id))

    def student_view(self, context):  # lint-amnesty, pylint: disable=missing-function-docstring
        fragment = Fragment()
        contents = []
        child_context = {} if not context else copy(context)

        for child in self._get_selected_child_blocks():
            if child is None:
                # TODO: Fix the underlying issue in TNL-7424
                # This shouldn't be happening, but does for an as-of-now
                # unknown reason. Until we address the underlying issue,
                # let's at least log the error explicitly, ignore the
                # exception, and prevent the page from resulting in a
                # 500-response.
                logger.error('Skipping display for child block that is None')
                continue
            for displayable in child.displayable_items():
                rendered_child = displayable.render(STUDENT_VIEW, child_context)
                fragment.add_fragment_resources(rendered_child)
                contents.append({
                    'id': text_type(displayable.location),
                    'content': rendered_child.content,
                })

        fragment.add_content(self.system.render_template('vert_module.html', {
            'items': contents,
            'xblock_context': context,
            'show_bookmark_button': False,
            'watched_completable_blocks': set(),
            'completion_delay_ms': None,
        }))
        return fragment

    def author_view(self, context):
        """
        Renders the Studio views.
        Normal studio view: If block is properly configured, displays library status summary
        Studio container view: displays a preview of all possible children.
        """
        fragment = Fragment()
        root_xblock = context.get('root_xblock')
        is_root = root_xblock and root_xblock.location == self.location

        if is_root:
            # User has clicked the "View" link. Show a preview of all possible children:
            if self.children:  # pylint: disable=no-member
                fragment.add_content(self.system.render_template("library-block-author-preview-header.html", {
                    'max_count': self.max_count,
                    'display_name': self.display_name or self.url_name,
                }))
                context['can_edit_visibility'] = False
                context['can_move'] = False
                self.render_children(context, fragment, can_reorder=False, can_add=False)
        # else: When shown on a unit page, don't show any sort of preview -
        # just the status of this block in the validation area.

        # The following JS is used to make the "Update now" button work on the unit page and the container view:
        fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/library_content_edit.js'))
        fragment.initialize_js('LibraryContentAuthorView')
        return fragment

    def studio_view(self, _context):
        """
        Return the studio view.
        """
        fragment = Fragment(
            self.system.render_template(self.mako_template, self.get_context())
        )
        add_webpack_to_fragment(fragment, 'LibraryContentBlockStudio')
        shim_xmodule_js(fragment, self.studio_js_module_name)
        return fragment

    def get_child_descriptors(self):
        """
        Return only the subset of our children relevant to the current student.
        """
        return list(self._get_selected_child_blocks())

    @property
    def non_editable_metadata_fields(self):
        non_editable_fields = super().non_editable_metadata_fields
        # The only supported mode is currently 'random'.
        # Add the mode field to non_editable_metadata_fields so that it doesn't
        # render in the edit form.
        non_editable_fields.extend([
            LibraryContentBlock.mode,
            LibraryContentBlock.source_library_version,
        ])
        return non_editable_fields

    @lazy
    def tools(self):
        """
        Grab the library tools service or raise an error.
        """
        return self.runtime.service(self, 'library_tools')

    def get_user_id(self):
        """
        Get the ID of the current user.
        """
        user_service = self.runtime.service(self, 'user')
        if user_service:
            # May be None when creating bok choy test fixtures
            user_id = user_service.get_current_user().opt_attrs.get('edx-platform.user_id', None)
        else:
            user_id = None
        return user_id

    @XBlock.handler
    def refresh_children(self, request=None, suffix=None):  # lint-amnesty, pylint: disable=unused-argument
        """
        Refresh children:
        This method is to be used when any of the libraries that this block
        references have been updated. It will re-fetch all matching blocks from
        the libraries, and copy them as children of this block. The children
        will be given new block_ids, but the definition ID used should be the
        exact same definition ID used in the library.

        This method will update this block's 'source_library_id' field to store
        the version number of the libraries used, so we easily determine if
        this block is up to date or not.
        """
        user_perms = self.runtime.service(self, 'studio_user_permissions')
        if not self.tools:
            return Response("Library Tools unavailable in current runtime.", status=400)
        self.tools.update_children(self, user_perms)
        return Response()

    # Copy over any overridden settings the course author may have applied to the blocks.
    def _copy_overrides(self, store, user_id, source, dest):
        """
        Copy any overrides the user has made on blocks in this library.
        """
        for field in six.itervalues(source.fields):
            if field.scope == Scope.settings and field.is_set_on(source):
                setattr(dest, field.name, field.read_from(source))
        if source.has_children:
            source_children = [self.runtime.get_block(source_key) for source_key in source.children]
            dest_children = [self.runtime.get_block(dest_key) for dest_key in dest.children]
            for source_child, dest_child in zip(source_children, dest_children):
                self._copy_overrides(store, user_id, source_child, dest_child)
        store.update_item(dest, user_id)

    def studio_post_duplicate(self, store, source_block):
        """
        Used by the studio after basic duplication of a source block. We handle the children
        ourselves, because we have to properly reference the library upstream and set the overrides.

        Otherwise we'll end up losing data on the next refresh.
        """
        # The first task will be to refresh our copy of the library to generate the children.
        # We must do this at the currently set version of the library block. Otherwise we may not have
        # exactly the same children-- someone may be duplicating an out of date block, after all.
        user_id = self.get_user_id()
        user_perms = self.runtime.service(self, 'studio_user_permissions')
        if not self.tools:
            raise RuntimeError("Library tools unavailable, duplication will not be sane!")
        self.tools.update_children(self, user_perms, version=self.source_library_version)

        self._copy_overrides(store, user_id, source_block, self)

        # Children have been handled.
        return True

    def _validate_library_version(self, validation, lib_tools, version, library_key):
        """
        Validates library version
        """
        latest_version = lib_tools.get_library_version(library_key)
        if latest_version is not None:
            if version is None or version != six.text_type(latest_version):
                validation.set_summary(
                    StudioValidationMessage(
                        StudioValidationMessage.WARNING,
                        _(u'This component is out of date. The library has new content.'),
                        # TODO: change this to action_runtime_event='...' once the unit page supports that feature.
                        # See https://openedx.atlassian.net/browse/TNL-993
                        action_class='library-update-btn',
                        # Translators: {refresh_icon} placeholder is substituted to "↻" (without double quotes)
                        action_label=_(u"{refresh_icon} Update now.").format(refresh_icon=u"↻")
                    )
                )
                return False
        else:
            validation.set_summary(
                StudioValidationMessage(
                    StudioValidationMessage.ERROR,
                    _(u'Library is invalid, corrupt, or has been deleted.'),
                    action_class='edit-button',
                    action_label=_(u"Edit Library List.")
                )
            )
            return False
        return True

    def _set_validation_error_if_empty(self, validation, summary):
        """  Helper method to only set validation summary if it's empty """
        if validation.empty:
            validation.set_summary(summary)

    def validate(self):
        """
        Validates the state of this Library Content Module Instance. This
        is the override of the general XBlock method, and it will also ask
        its superclass to validate.
        """
        validation = super().validate()
        if not isinstance(validation, StudioValidation):
            validation = StudioValidation.copy(validation)
        library_tools = self.runtime.service(self, "library_tools")
        if not (library_tools and library_tools.can_use_library_content(self)):
            validation.set_summary(
                StudioValidationMessage(
                    StudioValidationMessage.ERROR,
                    _(
                        u"This course does not support content libraries. "
                        u"Contact your system administrator for more information."
                    )
                )
            )
            return validation
        if not self.source_library_id:
            validation.set_summary(
                StudioValidationMessage(
                    StudioValidationMessage.NOT_CONFIGURED,
                    _(u"A library has not yet been selected."),
                    action_class='edit-button',
                    action_label=_(u"Select a Library.")
                )
            )
            return validation
        lib_tools = self.runtime.service(self, 'library_tools')
        self._validate_library_version(validation, lib_tools, self.source_library_version, self.source_library_key)

        # Note: we assume refresh_children() has been called
        # since the last time fields like source_library_id or capa_types were changed.
        matching_children_count = len(self.children)  # pylint: disable=no-member
        if matching_children_count == 0:
            self._set_validation_error_if_empty(
                validation,
                StudioValidationMessage(
                    StudioValidationMessage.WARNING,
                    _(u'There are no matching problem types in the specified libraries.'),
                    action_class='edit-button',
                    action_label=_(u"Select another problem type.")
                )
            )

        if matching_children_count < self.max_count:
            self._set_validation_error_if_empty(
                validation,
                StudioValidationMessage(
                    StudioValidationMessage.WARNING,
                    (
                        ngettext(
                            u'The specified library is configured to fetch {count} problem, ',
                            u'The specified library is configured to fetch {count} problems, ',
                            self.max_count
                        ) +
                        ngettext(
                            u'but there is only {actual} matching problem.',
                            u'but there are only {actual} matching problems.',
                            matching_children_count
                        )
                    ).format(count=self.max_count, actual=matching_children_count),
                    action_class='edit-button',
                    action_label=_(u"Edit the library configuration.")
                )
            )

        return validation

    def source_library_values(self):
        """
        Return a list of possible values for self.source_library_id
        """
        lib_tools = self.runtime.service(self, 'library_tools')
        user_perms = self.runtime.service(self, 'studio_user_permissions')
        all_libraries = [
            (key, bleach.clean(name)) for key, name in lib_tools.list_available_libraries()
            if user_perms.can_read(key) or self.source_library_id == six.text_type(key)
        ]
        all_libraries.sort(key=lambda entry: entry[1])  # Sort by name
        if self.source_library_id and self.source_library_key not in [entry[0] for entry in all_libraries]:
            all_libraries.append((self.source_library_id, _(u"Invalid Library")))
        all_libraries = [(u"", _("No Library Selected"))] + all_libraries
        values = [{"display_name": name, "value": six.text_type(key)} for key, name in all_libraries]
        return values

    def editor_saved(self, user, old_metadata, old_content):  # lint-amnesty, pylint: disable=unused-argument
        """
        If source_library_id or capa_type has been edited, refresh_children automatically.
        """
        old_source_library_id = old_metadata.get('source_library_id', [])
        if (old_source_library_id != self.source_library_id or
                old_metadata.get('capa_type', ANY_CAPA_TYPE_VALUE) != self.capa_type):
            try:
                self.refresh_children()
            except ValueError:
                pass  # The validation area will display an error message, no need to do anything now.

    def has_dynamic_children(self):
        """
        Inform the runtime that our children vary per-user.
        See get_child_descriptors() above
        """
        return True

    def get_content_titles(self):
        """
        Returns list of friendly titles for our selected children only; without
        thi, all possible children's titles would be seen in the sequence bar in
        the LMS.

        This overwrites the get_content_titles method included in x_module by default.
        """
        titles = []
        for child in self.get_child_descriptors():
            titles.extend(child.get_content_titles())
        return titles

    @classmethod
    def definition_from_xml(cls, xml_object, system):
        children = [
            system.process_xml(etree.tostring(child)).scope_ids.usage_id
            for child in xml_object.getchildren()
        ]
        definition = {
            attr_name: json.loads(attr_value)
            for attr_name, attr_value in xml_object.attrib.items()
        }
        return definition, children

    def definition_to_xml(self, resource_fs):
        """ Exports Library Content Module to XML """
        xml_object = etree.Element('library_content')
        for child in self.get_children():
            self.runtime.add_block_as_child_node(child, xml_object)
        # Set node attributes based on our fields.
        for field_name, field in six.iteritems(self.fields):
            if field_name in ('children', 'parent', 'content'):
                continue
            if field.is_set_on(self):
                xml_object.set(field_name, six.text_type(field.read_from(self)))
        return xml_object
Пример #8
0
class TipBlock(StudioEditableXBlockMixin, XBlockWithTranslationServiceMixin,
               XBlock, ExpandStaticURLMixin):
    """
    Each choice can define a tip depending on selection
    """
    content = String(
        display_name=_("Content"),
        help=
        _("Text of the tip to show if the student chooses this tip's associated choice[s]"
          ),
        scope=Scope.content,
        default="")
    values = List(
        display_name=_("For Choices"),
        help=_("List of choices for which to display this tip"),
        scope=Scope.content,
        default=[],
        list_values_provider=lambda self: self.get_parent().
        human_readable_choices,
        list_style='set',  # Underered, unique items. Affects the UI editor.
    )
    width = String(display_name=_("Width"),
                   help=_("Width of the tip popup (e.g. '400px')"),
                   scope=Scope.content,
                   default='')
    height = String(display_name=_("Height"),
                    help=_("Height of the tip popup (e.g. '200px')"),
                    scope=Scope.content,
                    default='')
    editable_fields = ('values', 'content', 'width', 'height')

    @property
    def display_name_with_default(self):
        values_list = []
        for entry in self.get_parent().human_readable_choices:
            if entry["value"] in self.values:
                display_name = strip_tags(
                    entry["display_name"]
                )  # Studio studio_view can't handle html in display_name
                if len(display_name) > 20:
                    display_name = display_name[:20] + u'…'
                values_list.append(display_name)
        return self._(u"Tip for {list_of_choices}").format(
            list_of_choices=u", ".join(values_list))

    def mentoring_view(self, context=None):
        """ Render this XBlock within a mentoring block. """
        html = loader.render_template("templates/html/tip.html", {
            'content': self.content,
            'width': self.width,
            'height': self.height,
        })
        return Fragment(html)

    def student_view_data(self, context=None):
        return {
            'display_name': self.display_name_with_default,
            'content': self.expand_static_url(self.content),
            'for_choices': self.values,
        }

    def student_view(self, context=None):
        """ Normal view of this XBlock, identical to mentoring_view """
        return self.mentoring_view(context)

    def clean_studio_edits(self, data):
        """
        Clean up the edits during studio_view save
        """
        if "values" in data:
            data["values"] = list(
                [six.text_type(v) for v in set(data["values"])])

    def validate_field_data(self, validation, data):
        """
        Validate this block's field data.
        """
        super(TipBlock, self).validate_field_data(validation, data)

        def add_error(msg):
            validation.add(ValidationMessage(ValidationMessage.ERROR, msg))

        try:
            valid_values = set(self.get_parent().all_choice_values)
        except Exception:
            pass
        else:
            for dummy in set(data.values) - valid_values:
                add_error(
                    self._(u"A choice selected for this tip does not exist."))

    @classmethod
    def parse_xml(cls, node, runtime, keys, id_generator):
        """
        Construct this XBlock from the given XML node.
        """
        block = runtime.construct_xblock_from_class(cls, keys)

        block.values = cls.values.from_string(node.get('values', '[]'))
        block.width = node.get('width', '')
        block.height = node.get('height', '')

        block.content = six.text_type(node.text or u"")
        for child in node:
            block.content += etree.tostring(child, encoding='unicode')

        return block
Пример #9
0
class LibraryRoot(XBlock):
    """
    The LibraryRoot is the root XBlock of a content library. All other blocks in
    the library are its children. It contains metadata such as the library's
    display_name.
    """
    display_name = String(
        help=_("Enter the name of the library as it should appear in Studio."),
        default="Library",
        display_name=_("Library Display Name"),
        scope=Scope.settings)
    advanced_modules = List(
        display_name=_("Advanced Module List"),
        help=_(
            "Enter the names of the advanced components to use in your library."
        ),
        scope=Scope.settings,
        xml_node=True,
    )
    show_children_previews = Boolean(
        display_name="Hide children preview",
        help="Choose if preview of library contents is shown",
        scope=Scope.user_state,
        default=True)
    has_children = True
    has_author_view = True

    def __unicode__(self):
        return u"Library: {}".format(self.display_name)

    def __str__(self):
        return unicode(self).encode('utf-8')

    def author_view(self, context):
        """
        Renders the Studio preview view.
        """
        fragment = Fragment()
        self.render_children(context,
                             fragment,
                             can_reorder=False,
                             can_add=True)
        return fragment

    def render_children(self,
                        context,
                        fragment,
                        can_reorder=False,
                        can_add=False):  # pylint: disable=unused-argument
        """
        Renders the children of the module with HTML appropriate for Studio. Reordering is not supported.
        """
        contents = []

        paging = context.get('paging', None)

        children_count = len(self.children)  # pylint: disable=no-member
        item_start, item_end = 0, children_count

        # TODO sort children
        if paging:
            page_number = paging.get('page_number', 0)
            raw_page_size = paging.get('page_size', None)
            page_size = raw_page_size if raw_page_size is not None else children_count
            item_start, item_end = page_size * page_number, page_size * (
                page_number + 1)

        children_to_show = self.children[item_start:item_end]  # pylint: disable=no-member

        force_render = context.get('force_render', None)

        for child_key in children_to_show:  # pylint: disable=E1101
            # Children must have a separate context from the library itself. Make a copy.
            child_context = context.copy()
            child_context['show_preview'] = self.show_children_previews
            child_context['can_edit_visibility'] = False
            child = self.runtime.get_block(child_key)
            child_view_name = StudioEditableModule.get_preview_view_name(child)

            if unicode(child.location) == force_render:
                child_context['show_preview'] = True

            if child_context['show_preview']:
                rendered_child = self.runtime.render_child(
                    child, child_view_name, child_context)
            else:
                rendered_child = self.runtime.render_child_placeholder(
                    child, child_view_name, child_context)
            fragment.add_frag_resources(rendered_child)

            contents.append({
                'id': unicode(child.location),
                'content': rendered_child.content,
            })

        fragment.add_content(
            self.runtime.render_template(
                "studio_render_paged_children_view.html", {
                    'items': contents,
                    'xblock_context': context,
                    'can_add': can_add,
                    'first_displayed': item_start,
                    'total_children': children_count,
                    'displayed_children': len(children_to_show),
                    'previews': self.show_children_previews
                }))

    @property
    def display_org_with_default(self):
        """
        Org display names are not implemented. This just provides API compatibility with CourseDescriptor.
        Always returns the raw 'org' field from the key.
        """
        return self.scope_ids.usage_id.course_key.org

    @property
    def display_number_with_default(self):
        """
        Display numbers are not implemented. This just provides API compatibility with CourseDescriptor.
        Always returns the raw 'library' field from the key.
        """
        return self.scope_ids.usage_id.course_key.library

    @XBlock.json_handler
    def trigger_previews(self, request_body, suffix):  # pylint: disable=unused-argument
        """ Enable or disable previews in studio for library children. """
        self.show_children_previews = request_body.get(
            'showChildrenPreviews', self.show_children_previews)
        return {'showChildrenPreviews': self.show_children_previews}
Пример #10
0
class DashboardBlock(StudioEditableXBlockMixin, ExportMixin, XBlock):
    """
    A block to summarize self-assessment results.
    """
    display_name = String(
        display_name=_("Display Name"),
        help=_("Display name for this module"),
        scope=Scope.settings,
        default=_('Self-Assessment Summary'),
    )
    mentoring_ids = List(
        display_name=_("Mentoring Blocks"),
        help=
        _("This should be an ordered list of the url_names of each mentoring block whose multiple choice question "
          "values are to be shown on this dashboard. The list should be in JSON format. Example: {example_here}"
          ).
        format(
            example_here=
            '["2754b8afc03a439693b9887b6f1d9e36", "215028f7df3d4c68b14fb5fea4da7053"]'
        ),
        scope=Scope.settings,
    )
    exclude_questions = Dict(
        display_name=_("Questions to be hidden"),
        help=
        _("Optional rules to exclude specific questions both from displaying in dashboard and from the calculated "
          "average. Rules must start with the url_name of a mentoring block, followed by list of question numbers "
          "to exclude. Rule set must be in JSON format. Question numbers are one-based (the first question being "
          "number 1). Must be in JSON format. Examples: {examples_here}").
        format(
            examples_here=
            '{"2754b8afc03a439693b9887b6f1d9e36":[1,2], "215028f7df3d4c68b14fb5fea4da7053":[1,5]}'
        ),
        scope=Scope.content,
        multiline_editor=True,
        resettable_editor=False,
    )
    color_rules = String(
        display_name=_("Color Coding Rules"),
        help=
        _("Optional rules to assign colors to possible answer values and average values. "
          "One rule per line. First matching rule will be used. Light colors are recommended. "
          "Examples: {examples_here}").format(
              examples_here=
              '"1: LightCoral", "0 <= x < 5: LightBlue", "LightGreen"'),
        scope=Scope.content,
        default="",
        multiline_editor=True,
        resettable_editor=False,
    )
    visual_rules = String(
        display_name=_("Visual Representation"),
        default="",
        help=
        _("Optional: Enter the JSON configuration of the visual representation desired (Advanced)."
          ),
        scope=Scope.content,
        multiline_editor=True,
        resettable_editor=False,
    )
    visual_title = String(
        display_name=_("Visual Representation Title"),
        default=_("Visual Representation"),
        help=
        _("This text is not displayed visually but is exposed to screen reader users who may not see the image."
          ),
        scope=Scope.content,
    )
    visual_desc = String(
        display_name=_("Visual Repr. Description"),
        default=
        _("The data represented in this image is available in the tables below."
          ),
        help=_(
            "This longer description is not displayed visually but is exposed to screen reader "
            "users who may not see the image."),
        scope=Scope.content,
    )
    average_labels = Dict(
        display_name=_("Label for average value"),
        help=_(
            "This settings allows overriding label for the calculated average per mentoring block. Must be in JSON "
            "format. Examples: {examples_here}.").
        format(
            examples_here=
            '{"2754b8afc03a439693b9887b6f1d9e36": "Avg.", "215028f7df3d4c68b14fb5fea4da7053": "Mean"}'
        ),
        scope=Scope.content,
    )
    show_numbers = Boolean(display_name=_("Display values"),
                           default=True,
                           help=_("Toggles if numeric values are displayed"),
                           scope=Scope.content)
    header_html = String(
        display_name=_("Header HTML"),
        default="",
        help=_("Custom text to include at the beginning of the report."),
        multiline_editor="html",
        resettable_editor=False,
        scope=Scope.content,
    )
    footer_html = String(
        display_name=_("Footer HTML"),
        default="",
        help=_("Custom text to include at the end of the report."),
        multiline_editor="html",
        resettable_editor=False,
        scope=Scope.content,
    )

    editable_fields = (
        'display_name',
        'mentoring_ids',
        'exclude_questions',
        'average_labels',
        'show_numbers',
        'color_rules',
        'visual_rules',
        'visual_title',
        'visual_desc',
        'header_html',
        'footer_html',
    )
    css_path = 'public/css/dashboard.css'
    js_path = 'public/js/review_blocks.js'

    def get_mentoring_blocks(self, mentoring_ids, ignore_errors=True):
        """
        Generator returning the specified mentoring blocks, in order.

        Will yield None for every invalid mentoring block ID, or if
        ignore_errors is False, will raise InvalidUrlName.
        """
        for url_name in mentoring_ids:
            try:
                mentoring_id = self.scope_ids.usage_id.course_key.make_usage_key(
                    'problem-builder', url_name)
                yield self.runtime.get_block(mentoring_id)
            except Exception:  # Catch-all b/c we could get XBlockNotFoundError, ItemNotFoundError, InvalidKeyError, ...
                # Maybe it's using the deprecated block type "mentoring":
                try:
                    mentoring_id = self.scope_ids.usage_id.course_key.make_usage_key(
                        'mentoring', url_name)
                    yield self.runtime.get_block(mentoring_id)
                except Exception:
                    if ignore_errors:
                        yield None
                    else:
                        raise InvalidUrlName(url_name)

    def parse_color_rules_str(self, color_rules_str, ignore_errors=True):
        """
        Parse the color rules. Returns a list of ColorRule objects.

        Color rules are like: "0 < x < 4: red" or "blue" (for a catch-all rule)
        """
        rules = []
        for lineno, line in enumerate(color_rules_str.splitlines()):
            line = line.strip()
            if line:
                try:
                    if ":" in line:
                        condition, value = line.split(':')
                        value = value.strip()
                        if condition.isnumeric(
                        ):  # A condition just listed as an exact value
                            condition = "x == " + condition
                    else:
                        condition = "1"  # Always true
                        value = line
                    rules.append(ColorRule(condition, value))
                except ValueError:
                    if ignore_errors:
                        continue
                    raise ValueError(
                        _("Invalid color rule on line {line_number}").format(
                            line_number=lineno + 1))
        return rules

    @lazy
    def color_rules_parsed(self):
        """
        Caching property to get parsed color rules. Returns a list of ColorRule objects.
        """
        return self.parse_color_rules_str(
            self.color_rules) if self.color_rules else []

    def _get_submission_key(self, usage_key):
        """
        Given the usage_key of an MCQ block, get the dict key needed to look it up with the
        submissions API.
        """
        return dict(
            student_id=self.runtime.anonymous_student_id,
            course_id=unicode(usage_key.course_key),
            item_id=unicode(usage_key),
            item_type=usage_key.block_type,
        )

    def color_for_value(self, value):
        """ Given a string value, get the color rule that matches, if any """
        if isinstance(value, basestring):
            if value.isnumeric():
                value = float(value)
            else:
                return None
        for rule in self.color_rules_parsed:
            if rule.matches(value):
                return rule.color_str
        return None

    def _get_problem_questions(self, mentoring_block):
        """ Generator returning only children of specified block that are MCQs """
        for child_id in mentoring_block.children:
            if child_isinstance(mentoring_block, child_id, MCQBlock):
                yield child_id

    @XBlock.supports("multi_device")  # Mark as mobile-friendly
    def student_view(self, context=None):  # pylint: disable=unused-argument
        """
        Standard view of this XBlock.
        """
        if not self.mentoring_ids:
            return Fragment(u"<h1>{}</h1><p>{}</p>".format(
                self.display_name, _("Not configured.")))

        blocks = []
        for mentoring_block in self.get_mentoring_blocks(self.mentoring_ids):
            if mentoring_block is None:
                continue
            block = {'display_name': mentoring_block.display_name, 'mcqs': []}
            try:
                hide_questions = self.exclude_questions.get(
                    mentoring_block.url_name, [])
            except Exception:  # pylint: disable=broad-except-clause
                log.exception(
                    "Cannot parse exclude_questions setting - probably malformed: %s",
                    self.exclude_questions)
                hide_questions = []

            for question_number, child_id in enumerate(
                    self._get_problem_questions(mentoring_block), 1):
                try:
                    if question_number in hide_questions:
                        continue
                except TypeError:
                    log.exception(
                        "Cannot check question number - expected list of ints got: %s",
                        hide_questions)

                # Get the student's submitted answer to this MCQ from the submissions API:
                mcq_block = self.runtime.get_block(child_id)
                mcq_submission_key = self._get_submission_key(child_id)
                try:
                    value = sub_api.get_submissions(mcq_submission_key,
                                                    limit=1)[0]["answer"]
                except IndexError:
                    value = None

                block['mcqs'].append({
                    "display_name":
                    mcq_block.display_name_with_default,
                    "value":
                    value,
                    "accessible_value":
                    _("Score: {score}").format(
                        score=value) if value else _("No value yet"),
                    "color":
                    self.color_for_value(value) if value is not None else None,
                })
            # If the values are numeric, display an average:
            numeric_values = [
                float(mcq['value']) for mcq in block['mcqs']
                if mcq['value'] is not None and mcq['value'].isnumeric()
            ]
            if numeric_values:
                average_value = sum(numeric_values) / len(numeric_values)
                block['average'] = average_value
                # average block is shown only if average value exists, so accessible text for no data is not required
                block['accessible_average'] = _("Score: {score}").format(
                    score=floatformat(average_value))
                block['average_label'] = self.average_labels.get(
                    mentoring_block.url_name, _("Average"))
                block['has_average'] = True
                block['average_color'] = self.color_for_value(average_value)
            blocks.append(block)

        visual_repr = None
        if self.visual_rules:
            try:
                rules_parsed = json.loads(self.visual_rules)
            except ValueError:
                pass  # JSON errors should be shown as part of validation
            else:
                visual_repr = DashboardVisualData(blocks, rules_parsed,
                                                  self.color_for_value,
                                                  self.visual_title,
                                                  self.visual_desc)

        report_template = loader.render_template(
            'templates/html/dashboard_report.html', {
                'title': self.display_name,
                'css': loader.load_unicode(self.css_path),
                'student_name': self._get_user_full_name(),
                'course_name': self._get_course_name(),
            })

        html = loader.render_template(
            'templates/html/dashboard.html', {
                'blocks': blocks,
                'display_name': self.display_name,
                'visual_repr': visual_repr,
                'show_numbers': self.show_numbers,
                'header_html': self.header_html,
                'footer_html': self.footer_html,
            })

        fragment = Fragment(html)
        fragment.add_css_url(
            self.runtime.local_resource_url(self, self.css_path))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, self.js_path))
        fragment.initialize_js(
            'PBDashboardBlock', {
                'reportTemplate': report_template,
                'reportContentSelector': '.dashboard-report'
            })
        return fragment

    def validate_field_data(self, validation, data):
        """
        Validate this block's field data.
        """
        super(DashboardBlock, self).validate_field_data(validation, data)

        def add_error(msg):
            validation.add(ValidationMessage(ValidationMessage.ERROR, msg))

        try:
            list(
                self.get_mentoring_blocks(data.mentoring_ids,
                                          ignore_errors=False))
        except InvalidUrlName as e:
            add_error(
                _(u'Invalid block url_name given: "{bad_url_name}"').format(
                    bad_url_name=unicode(e)))

        if data.exclude_questions:
            for key, value in data.exclude_questions.iteritems():
                if not isinstance(value, list):
                    add_error(
                        _(u"'Questions to be hidden' is malformed: value for key {key} is {value}, "
                          u"expected list of integers").format(key=key,
                                                               value=value))

                if key not in data.mentoring_ids:
                    add_error(
                        _(u"'Questions to be hidden' is malformed: mentoring url_name {url_name} "
                          u"is not added to Dashboard").format(url_name=key))

        if data.average_labels:
            for key, value in data.average_labels.iteritems():
                if not isinstance(value, basestring):
                    add_error(
                        _(u"'Label for average value' is malformed: value for key {key} is {value}, expected string"
                          ).format(key=key, value=value))

                if key not in data.mentoring_ids:
                    add_error(
                        _(u"'Label for average value' is malformed: mentoring url_name {url_name} "
                          u"is not added to Dashboard").format(url_name=key))

        if data.color_rules:
            try:
                self.parse_color_rules_str(data.color_rules,
                                           ignore_errors=False)
            except ValueError as e:
                add_error(unicode(e))

        if data.visual_rules:
            try:
                rules = json.loads(data.visual_rules)
            except ValueError as e:
                add_error(
                    _(u"Visual rules contains an error: {error}").format(
                        error=e))
            else:
                if not isinstance(rules, dict):
                    add_error(
                        _(u"Visual rules should be a JSON dictionary/object: {...}"
                          ))
Пример #11
0
class LeafWithDictAndList(XBlock):
    """A leaf containing dict and list options."""
    dictionary = Dict(default={"default": True}, scope=Scope.user_state)
    sequence = List(default=[1, 2, 3], scope=Scope.user_state)
Пример #12
0
class MentoringBlock(XBlockWithLightChildren, StepParentMixin):
    """
    An XBlock providing mentoring capabilities

    Composed of text, answers input fields, and a set of MRQ/MCQ with advices.
    A set of conditions on the provided answers and MCQ/MRQ choices will determine if the
    student is a) provided mentoring advices and asked to alter his answer, or b) is given the
    ok to continue.
    """

    @staticmethod
    def is_default_xml_content(value):
        return _is_default_xml_content(value)

    attempted = Boolean(help="Has the student attempted this mentoring step?",
                        default=False, scope=Scope.user_state)
    completed = Boolean(help="Has the student completed this mentoring step?",
                        default=False, scope=Scope.user_state)
    next_step = String(help="url_name of the next step the student must complete (global to all blocks)",
                       default='mentoring_first', scope=Scope.preferences)
    followed_by = String(help="url_name of the step after the current mentoring block in workflow",
                         default=None, scope=Scope.content)
    url_name = String(help="Name of the current step, used for URL building",
                      default='mentoring-default', scope=Scope.content)
    enforce_dependency = Boolean(help="Should the next step be the current block to complete?",
                                 default=False, scope=Scope.content, enforce_type=True)
    display_submit = Boolean(help="Allow submission of the current block?", default=True,
                             scope=Scope.content, enforce_type=True)
    xml_content = String(help="XML content", default=DEFAULT_XML_CONTENT, scope=Scope.content)
    weight = Float(help="Defines the maximum total grade of the block.",
                   default=1, scope=Scope.content, enforce_type=True)
    num_attempts = Integer(help="Number of attempts a user has answered for this questions",
                           default=0, scope=Scope.user_state, enforce_type=True)
    max_attempts = Integer(help="Number of max attempts for this questions", default=0,
                           scope=Scope.content, enforce_type=True)
    mode = String(help="Mode of the mentoring. 'standard' or 'assessment'",
                  default='standard', scope=Scope.content)
    step = Integer(help="Keep track of the student assessment progress.",
                   default=0, scope=Scope.user_state, enforce_type=True)
    student_results = List(help="Store results of student choices.", default=[],
                           scope=Scope.user_state)
    extended_feedback = Boolean(help="Show extended feedback details when all attempts are used up.",
                                default=False, Scope=Scope.content)
    display_name = String(help="Display name of the component", default="Mentoring XBlock",
                          scope=Scope.settings)
    icon_class = 'problem'
    has_score = True

    MENTORING_MODES = ('standard', 'assessment')

    FLOATING_BLOCKS = (TitleBlock, MentoringMessageBlock, SharedHeaderBlock)

    FIELDS_TO_INIT = ('xml_content',)

    @property
    def is_assessment(self):
        return self.mode == 'assessment'

    def get_question_number(self, question_id):
        """
        Get the step number of the question id
        """
        for question in self.get_children_objects():
            if hasattr(question, 'step_number') and (question.name == question_id):
                return question.step_number
        raise ValueError("Question ID in answer set not a step of this Mentoring Block!")

    def answer_mapper(self, answer_status):
        """
        Create a JSON-dumpable object with readable key names from a list of student answers.
        """
        answer_map = []
        for answer in self.student_results:
            if answer[1]['status'] == answer_status:
                try:
                    answer_map.append({
                        'number': self.get_question_number(answer[0]),
                        'id': answer[0],
                        'details': answer[1],
                    })
                except ValueError:
                    pass
        return answer_map

    @property
    def score(self):
        """Compute the student score taking into account the light child weight."""
        total_child_weight = sum(float(step.weight) for step in self.steps)
        if total_child_weight == 0:
            return Score(0, 0, [], [], [])
        steps_map = {q.name: q for q in self.steps}
        points_earned = 0
        for q_name, q_details in self.student_results:
            question = steps_map.get(q_name)
            if question:
                points_earned += q_details['score'] * question.weight
        score = points_earned / total_child_weight
        correct = self.answer_mapper(CORRECT)
        incorrect = self.answer_mapper(INCORRECT)
        partially_correct = self.answer_mapper(PARTIAL)

        return Score(score, int(round(score * 100)), correct, incorrect, partially_correct)

    @property
    def assessment_message(self):
        if not self.max_attempts_reached:
            return self.get_message_html('on-assessment-review')
        else:
            return None

    def show_extended_feedback(self):
        return self.extended_feedback and self.max_attempts_reached

    def feedback_dispatch(self, target_data, stringify):
        if self.show_extended_feedback():
            if stringify:
                return json.dumps(target_data)
            else:
                return target_data

    def correct_json(self, stringify=True):
        return self.feedback_dispatch(self.score.correct, stringify)

    def incorrect_json(self, stringify=True):
        return self.feedback_dispatch(self.score.incorrect, stringify)

    def partial_json(self, stringify=True):
        return self.feedback_dispatch(self.score.partially_correct, stringify)

    def student_view(self, context):
        # Migrate stored data if necessary
        self.migrate_fields()

        # Validate self.step:
        num_steps = len([child for child in self.get_children_objects() if not isinstance(child, self.FLOATING_BLOCKS)])
        if self.step > num_steps:
            self.step = num_steps

        fragment, named_children = self.get_children_fragment(
            context, view_name='mentoring_view',
            not_instance_of=self.FLOATING_BLOCKS,
        )

        fragment.add_content(loader.render_template('templates/html/mentoring.html', {
            'self': self,
            'named_children': named_children,
            'missing_dependency_url': self.has_missing_dependency and self.next_step_url,
        }))
        fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/mentoring.css'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/vendor/underscore-min.js'))

        js_view = 'mentoring_assessment_view.js' if self.is_assessment else 'mentoring_standard_view.js'
        fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/'+js_view))

        fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/mentoring.js'))
        fragment.add_resource(loader.load_unicode('templates/html/mentoring_attempts.html'), "text/html")
        fragment.add_resource(loader.load_unicode('templates/html/mentoring_grade.html'), "text/html")
        fragment.add_resource(loader.load_unicode('templates/html/mentoring_review_questions.html'), "text/html")

        fragment.initialize_js('MentoringBlock')

        if not self.display_submit:
            self.runtime.publish(self, 'progress', {})

        return fragment

    def migrate_fields(self):
        """
        Migrate data stored in the fields, when a format change breaks backward-compatibility with
        previous data formats
        """
        # Partial answers replaced the `completed` with `status` in `self.student_results`
        if self.student_results and 'completed' in self.student_results[0][1]:
            # Rename the field and use the new value format (text instead of boolean)
            for result in self.student_results:
                result[1]['status'] = CORRECT if result[1]['completed'] else INCORRECT
                del result[1]['completed']

    @property
    def additional_publish_event_data(self):
        return {
            'user_id': self.scope_ids.user_id,
            'component_id': self.url_name,
        }

    @property
    def title(self):
        """
        Returns the title child.
        """
        for child in self.get_children_objects():
            if isinstance(child, TitleBlock):
                return child
        return None

    @property
    def header(self):
        """
        Return the header child.
        """
        for child in self.get_children_objects():
            if isinstance(child, SharedHeaderBlock):
                return child
        return None

    @property
    def has_missing_dependency(self):
        """
        Returns True if the student needs to complete another step before being able to complete
        the current one, and False otherwise
        """
        return self.enforce_dependency and (not self.completed) and (self.next_step != self.url_name)

    @property
    def next_step_url(self):
        """
        Returns the URL of the next step's page
        """
        return '/jump_to_id/{}'.format(self.next_step)

    @XBlock.json_handler
    def get_results(self, queries, suffix=''):
        """
        Gets detailed results in the case of extended feedback.

        It may be a good idea to eventually have this function get results
        in the general case instead of loading them in the template in the future,
        and only using it for extended feedback situations.

        Right now there are two ways to get results-- through the template upon loading up
        the mentoring block, or after submission of an AJAX request like in
        submit or get_results here.
        """
        results = []
        if not self.show_extended_feedback():
            return {
                'results': [],
                'error': 'Extended feedback results cannot be obtained.'
            }
        completed = True
        choices = dict(self.student_results)
        step = self.step
        # Only one child should ever be of concern with this method.
        for child in self.get_children_objects():
            if child.name and child.name in queries:
                results = [child.name, child.get_results(choices[child.name])]
                # Children may have their own definition of 'completed' which can vary from the general case
                # of the whole mentoring block being completed. This is because in standard mode, all children
                # must be correct to complete the block. In assessment mode with extended feedback, completion
                # happens when you're out of attempts, no matter how you did.
                completed = choices[child.name]['status']
                break

        # The 'completed' message should always be shown in this case, since no more attempts are available.
        message = self.get_message(True)

        return {
            'results': results,
            'completed': completed,
            'attempted': self.attempted,
            'message': message,
            'step': step,
            'max_attempts': self.max_attempts,
            'num_attempts': self.num_attempts,
        }

    def get_message(self, completed):
        if self.max_attempts_reached:
            return self.get_message_html('max_attempts_reached')
        elif completed:
            return self.get_message_html('completed')
        else:
            return self.get_message_html('incomplete')

    @XBlock.json_handler
    def submit(self, submissions, suffix=''):
        log.info(u'Received submissions: {}'.format(submissions))
        self.attempted = True

        if self.is_assessment:
            return self.handleAssessmentSubmit(submissions, suffix)

        submit_results = []
        completed = True
        for child in self.get_children_objects():
            if child.name and child.name in submissions:
                submission = submissions[child.name]
                child_result = child.submit(submission)
                submit_results.append([child.name, child_result])
                child.save()
                completed = completed and (child_result['status'] == CORRECT)

        message = self.get_message(completed)

        # Once it has been completed once, keep completion even if user changes values
        if self.completed:
            completed = True

        # server-side check to not set completion if the max_attempts is reached
        if self.max_attempts_reached:
            completed = False

        if self.has_missing_dependency:
            completed = False
            message = 'You need to complete all previous steps before being able to complete the current one.'
        elif completed and self.next_step == self.url_name:
            self.next_step = self.followed_by

        # Once it was completed, lock score
        if not self.completed:
            # save user score and results
            while self.student_results:
                self.student_results.pop()
            for result in submit_results:
                self.student_results.append(result)

            self.runtime.publish(self, 'grade', {
                'value': self.score.raw,
                'max_value': 1,
            })

        if not self.completed and self.max_attempts > 0:
            self.num_attempts += 1

        self.completed = completed is True

        raw_score = self.score.raw

        self.publish_event_from_dict('xblock.mentoring.submitted', {
            'num_attempts': self.num_attempts,
            'submitted_answer': submissions,
            'grade': raw_score,
        })

        return {
            'results': submit_results,
            'completed': self.completed,
            'attempted': self.attempted,
            'message': message,
            'max_attempts': self.max_attempts,
            'num_attempts': self.num_attempts
        }

    def handleAssessmentSubmit(self, submissions, suffix):
        completed = False
        current_child = None
        children = [child for child in self.get_children_objects()
                    if not isinstance(child, self.FLOATING_BLOCKS)]

        assessment_message = None

        for child in children:
            if child.name and child.name in submissions:
                submission = submissions[child.name]

                # Assessment mode doesn't allow to modify answers
                # This will get the student back at the step he should be
                current_child = child
                step = children.index(child)
                if self.step > step or self.max_attempts_reached:
                    step = self.step
                    completed = False
                    break

                self.step = step + 1

                child_result = child.submit(submission)
                if 'tips' in child_result:
                    del child_result['tips']
                self.student_results.append([child.name, child_result])
                child.save()
                completed = child_result['status']

        event_data = {}

        score = self.score

        if current_child == self.steps[-1]:
            log.info(u'Last assessment step submitted: {}'.format(submissions))
            if not self.max_attempts_reached:
                self.runtime.publish(self, 'grade', {
                    'value': score.raw,
                    'max_value': 1,
                    'score_type': 'proficiency',
                })
                event_data['final_grade'] = score.raw
                assessment_message = self.assessment_message

            self.num_attempts += 1
            self.completed = True

        event_data['exercise_id'] = current_child.name
        event_data['num_attempts'] = self.num_attempts
        event_data['submitted_answer'] = submissions

        self.publish_event_from_dict('xblock.mentoring.assessment.submitted', event_data)

        return {
            'completed': completed,
            'attempted': self.attempted,
            'max_attempts': self.max_attempts,
            'num_attempts': self.num_attempts,
            'step': self.step,
            'score': score.percentage,
            'correct_answer': len(score.correct),
            'incorrect_answer': len(score.incorrect),
            'partially_correct_answer': len(score.partially_correct),
            'extended_feedback': self.show_extended_feedback() or '',
            'correct': self.correct_json(stringify=False),
            'incorrect': self.incorrect_json(stringify=False),
            'partial': self.partial_json(stringify=False),
            'assessment_message': assessment_message,
        }

    @XBlock.json_handler
    def try_again(self, data, suffix=''):

        if self.max_attempts_reached:
            return {
                'result': 'error',
                'message': 'max attempts reached'
            }

        # reset
        self.step = 0
        self.completed = False

        while self.student_results:
            self.student_results.pop()

        return {
            'result': 'success'
        }

    @property
    def max_attempts_reached(self):
        return self.max_attempts > 0 and self.num_attempts >= self.max_attempts

    def get_message_fragment(self, message_type):
        for child in self.get_children_objects():
            if isinstance(child, MentoringMessageBlock) and child.type == message_type:
                frag = self.render_child(child, 'mentoring_view', {})
                return self.fragment_text_rewriting(frag)

    def get_message_html(self, message_type):
        fragment = self.get_message_fragment(message_type)
        if fragment:
            return fragment.body_html()
        else:
            return ''

    def studio_view(self, context):
        """
        Editing view in Studio
        """
        fragment = Fragment()
        fragment.add_content(loader.render_template('templates/html/mentoring_edit.html', {
            'self': self,
            'xml_content': self.xml_content,
        }))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/mentoring_edit.js'))
        fragment.add_css_url(
            self.runtime.local_resource_url(self, 'public/css/mentoring_edit.css'))

        fragment.initialize_js('MentoringEditBlock')

        return fragment

    @XBlock.json_handler
    def studio_submit(self, submissions, suffix=''):
        log.info(u'Received studio submissions: {}'.format(submissions))

        xml_content = submissions['xml_content']
        try:
            content = etree.parse(StringIO(xml_content))
        except etree.XMLSyntaxError as e:
            response = {
                'result': 'error',
                'message': e.message
            }

        else:
            success = True
            root = content.getroot()
            if 'mode' in root.attrib:
                if root.attrib['mode'] not in self.MENTORING_MODES:
                    response = {
                        'result': 'error',
                        'message': "Invalid mentoring mode: should be 'standard' or 'assessment'"
                    }
                    success = False
                elif root.attrib['mode'] == 'assessment' and 'max_attempts' not in root.attrib:
                    # assessment has a default of 2 max_attempts
                    root.attrib['max_attempts'] = '2'

            if success:
                response = {
                    'result': 'success',
                }
                self.xml_content = etree.tostring(content, pretty_print=True)

        log.debug(u'Response from Studio: {}'.format(response))
        return response

    @property
    def url_name_with_default(self):
        """
        Ensure the `url_name` is set to a unique, non-empty value.
        This should ideally be handled by Studio, but we need to declare the attribute
        to be able to use it from the workbench, and when this happen Studio doesn't set
        a unique default value - this property gives either the set value, or if none is set
        a randomized default value
        """
        if self.url_name == 'mentoring-default':
            return 'mentoring-{}'.format(uuid.uuid4())
        else:
            return self.url_name

    @staticmethod
    def workbench_scenarios():
        """
        Scenarios displayed by the workbench. Load them from external (private) repository
        """
        return loader.load_scenarios_from_path('templates/xml')
Пример #13
0
class XBlock(Plugin):
    """Base class for XBlocks.

    Derive from this class to create a new kind of XBlock.  There are no
    required methods, but you will probably need at least one view.

    Don't provide the ``__init__`` method when deriving from this class.

    """

    __metaclass__ = XBlockMetaclass

    entry_point = 'xblock.v1'

    parent = Reference(help='The id of the parent of this XBlock',
                       default=None,
                       scope=Scope.parent)
    name = String(help="Short name for the block", scope=Scope.settings)
    tags = List(help="Tags for this block", scope=Scope.settings)

    _class_tags = set()

    @classmethod
    def json_handler(cls, func):
        """Wrap a handler to consume and produce JSON.

        Rather than a Request object, the method will now be passed the
        JSON-decoded body of the request.  Any data returned by the function
        will be JSON-encoded and returned as the response.

        The wrapped function can raise JsonHandlerError to return an error
        response with a non-200 status code.
        """
        @XBlock.handler
        @functools.wraps(func)
        def wrapper(self, request, suffix=''):
            """The wrapper function `json_handler` returns."""
            if request.method != "POST":
                return JsonHandlerError(
                    405, "Method must be POST").get_response(allow=["POST"])
            try:
                request_json = json.loads(request.body)
            except ValueError:
                return JsonHandlerError(400, "Invalid JSON").get_response()
            try:
                response_json = json.dumps(func(self, request_json, suffix))
            except JsonHandlerError as e:
                return e.get_response()
            return Response(response_json, content_type='application/json')

        return wrapper

    @classmethod
    def handler(cls, func):
        """A decorator to indicate a function is usable as a handler."""
        func._is_xblock_handler = True  # pylint: disable=protected-access
        return func

    @staticmethod
    def tag(tags):
        """Returns a function that adds the words in `tags` as class tags to this class."""
        def dec(cls):
            """Add the words in `tags` as class tags to this class."""
            # Add in this class's tags
            cls._class_tags.update(tags.replace(",", " ").split())  # pylint: disable=protected-access
            return cls

        return dec

    @classmethod
    def load_tagged_classes(cls, tag):
        """Produce a sequence of all XBlock classes tagged with `tag`."""
        # Allow this method to access the `_class_tags`
        # pylint: disable=W0212
        for name, class_ in cls.load_classes():
            if tag in class_._class_tags:
                yield name, class_

    @classmethod
    def open_local_resource(cls, uri):
        """Open a local resource.

        The container calls this method when it receives a request for a
        resource on a URL which was generated by Runtime.local_resource_url().
        It will pass the URI from the original call to local_resource_url()
        back to this method. The XBlock must parse this URI and return an open
        file-like object for the resource.

        For security reasons, the default implementation will return only a
        very restricted set of file types, which must be located in a folder
        called "public". XBlock authors who want to override this behavior will
        need to take care to ensure that the method only serves legitimate
        public resources. At the least, the URI should be matched against a
        whitelist regex to ensure that you do not serve an unauthorized
        resource.

        """
        # Verify the URI is in whitelisted form before opening for serving.
        # URI must begin with public/, all file/folder names must use only
        # characters from [a-zA-Z0-9\-_], and the file type must be one of
        # jpg, jpeg, png, gif, js, css, json or html
        assert re.match(
            r'^public/([a-zA-Z0-9\-_]+/)*[a-zA-Z0-9\-_]+\.(jpg|jpeg|png|gif|js|css|json|html)$',
            uri)
        return pkg_resources.resource_stream(cls.__module__, uri)

    @staticmethod
    def needs(service_name):
        """A class decorator to indicate that an XBlock class needs a particular service."""
        def _decorator(cls):  # pylint: disable=missing-docstring
            cls._services_requested[service_name] = "need"  # pylint: disable=protected-access
            return cls

        return _decorator

    @staticmethod
    def wants(service_name):
        """A class decorator to indicate that an XBlock class wants a particular service."""
        def _decorator(cls):  # pylint: disable=missing-docstring
            cls._services_requested[service_name] = "want"  # pylint: disable=protected-access
            return cls

        return _decorator

    @classmethod
    def service_declaration(cls, service_name):
        """
        Find and return a service declaration.

        XBlocks declare their service requirements with @XBlock.needs and
        @XBlock.wants decorators.  These store information on the class.
        This function finds those declarations for a block.

        Arguments:
            service_name (string): the name of the service requested.

        Returns:
            One of "need", "want", or None.

        """
        # The class declares what services it desires. To deal with subclasses,
        # especially mixins, properly, we have to walk up the inheritance
        # hierarchy, and combine all the declared services into one dictionary.
        # We do this once per class, then store the result on the class.
        if "_combined_services" not in cls.__dict__:
            # Walk the MRO chain, collecting all the services together.
            combined = {}
            for parent in reversed(cls.__mro__):
                combined.update(getattr(parent, "_services_requested", {}))
            cls._combined_services = combined
        declaration = cls._combined_services.get(service_name)
        return declaration

    def __init__(self, runtime, field_data, scope_ids):
        """
        Construct a new XBlock.

        This class should only be instantiated by runtimes.

        Arguments:

            runtime (:class:`.Runtime`): Use it to access the environment.
                It is available in XBlock code as ``self.runtime``.

            field_data (:class:`.FieldData`): Interface used by the XBlock
                fields to access their data from wherever it is persisted.

            scope_ids (:class:`.ScopeIds`): Identifiers needed to resolve
                scopes.

        """
        self.runtime = runtime
        self._field_data = field_data
        self._field_data_cache = {}
        self._dirty_fields = {}
        self.scope_ids = scope_ids

        # A cache of the parent block, retrieved from .parent
        self._parent_block = None
        self._parent_block_id = None

    def __repr__(self):
        # `XBlock` obtains the `fields` attribute from the `ModelMetaclass`.
        # Since this is not understood by static analysis, silence this error.
        # pylint: disable=E1101
        attrs = []
        for field in self.fields.values():
            try:
                value = getattr(self, field.name)
            except Exception:  # pylint: disable=W0703
                # Ensure we return a string, even if unanticipated exceptions.
                attrs.append(" %s=???" % (field.name, ))
            else:
                if isinstance(value, basestring):
                    value = value.strip()
                    if len(value) > 40:
                        value = value[:37] + "..."
                attrs.append(" %s=%r" % (field.name, value))
        return "<%s @%04X%s>" % (self.__class__.__name__, id(self) % 0xFFFF,
                                 ','.join(attrs))

    def get_parent(self):
        """Return the parent block of this block, or None if there isn't one."""
        if self._parent_block_id != self.parent:
            if self.parent is not None:
                self._parent_block = self.runtime.get_block(self.parent)
            else:
                self._parent_block = None
            self._parent_block_id = self.parent
        return self._parent_block

    def render(self, view, context=None):
        """Render `view` with this block's runtime and the supplied `context`"""
        return self.runtime.render(self, view, context)

    def handle(self, handler_name, request, suffix=''):
        """Handle `request` with this block's runtime."""
        return self.runtime.handle(self, handler_name, request, suffix)

    def save(self):
        """Save all dirty fields attached to this XBlock."""
        if not self._dirty_fields:
            # nop if _dirty_fields attribute is empty
            return
        try:
            fields_to_save = self._get_fields_to_save()
            # Throws KeyValueMultiSaveError if things go wrong
            self._field_data.set_many(self, fields_to_save)

        except KeyValueMultiSaveError as save_error:
            saved_fields = [
                field for field in self._dirty_fields
                if field.name in save_error.saved_field_names
            ]
            for field in saved_fields:
                # should only find one corresponding field
                del self._dirty_fields[field]
            raise XBlockSaveError(saved_fields, self._dirty_fields.keys())

        # Remove all dirty fields, since the save was successful
        self._clear_dirty_fields()

    def _get_fields_to_save(self):
        """
        Create dictionary mapping between dirty fields and data cache values.
        A `field` is an instance of `Field`.
        """
        fields_to_save = {}
        for field in self._dirty_fields.keys():
            # If the field value isn't the same as the baseline we recorded
            # when it was read, then save it
            if field._is_dirty(self):  # pylint: disable=protected-access
                fields_to_save[field.name] = field.to_json(
                    self._field_data_cache[field.name])
        return fields_to_save

    def _clear_dirty_fields(self):
        """
        Remove all dirty fields from an XBlock.
        """
        self._dirty_fields.clear()

    @classmethod
    def parse_xml(cls, node, runtime, keys, id_generator):
        """
        Use `node` to construct a new block.

        Arguments:
            node (etree.Element): The xml node to parse into an xblock.

            runtime (:class:`.Runtime`): The runtime to use while parsing.

            keys (:class:`.ScopeIds`): The keys identifying where this block
                will store its data.

            id_generator (:class:`.IdGenerator`): An object that will allow the
                runtime to generate correct definition and usage ids for
                children of this block.

        """
        block = runtime.construct_xblock_from_class(cls, keys)

        # The base implementation: child nodes become child blocks.
        for child in node:
            block.runtime.add_node_as_child(block, child, id_generator)

        # Attributes become fields.
        for name, value in node.items():
            if name in block.fields:
                setattr(block, name, value)

        # Text content becomes "content", if such a field exists.
        if "content" in block.fields and block.fields[
                "content"].scope == Scope.content:
            text = node.text
            if text:
                text = text.strip()
                if text:
                    block.content = text

        return block

    def add_xml_to_node(self, node):
        """
        For exporting, set data on `node` from ourselves.
        """
        # pylint: disable=E1101
        # Set node.tag based on our class name.
        node.tag = self.xml_element_name()

        # Set node attributes based on our fields.
        for field_name, field in self.fields.items():
            if field_name in ('children', 'parent', 'content'):
                continue
            if field.is_set_on(self):
                node.set(field_name, unicode(field.read_from(self)))

        # Add children for each of our children.
        if self.has_children:
            for child_id in self.children:
                child = self.runtime.get_block(child_id)
                self.runtime.add_block_as_child_node(child, node)

        # A content field becomes text content.
        text = self.xml_text_content()
        if text is not None:
            node.text = text

    def xml_element_name(self):
        """What XML element name should be used for this block?"""
        return self.scope_ids.block_type

    def xml_text_content(self):
        """What is the text content for this block's XML node?"""
        # pylint: disable=E1101
        if 'content' in self.fields and self.content:
            return self.content
        else:
            return None
Пример #14
0
class HastexoXBlock(XBlock,
                    XBlockWithSettingsMixin,
                    StudioEditableXBlockMixin):
    """
    Provides lab environments and an SSH connection to them.

    """
    # Settings with defaults.
    display_name = String(
        default="Lab",
        scope=Scope.settings,
        help="Title to display")
    weight = Float(
        default=1,
        scope=Scope.settings,
        help="Defines the maximum total grade of the block.")

    # Mandatory: must be set per instance.
    stack_template_path = String(
        scope=Scope.settings,
        help="The relative path to the uploaded orchestration template. "
             "For example, \"hot_lab.yaml\".")
    stack_user_name = String(
        scope=Scope.settings,
        help="The name of the training user in the stack.")
    stack_protocol = String(
        values=["ssh", "rdp", "vnc"],
        default="ssh",
        scope=Scope.settings,
        help="What protocol to use for the connection. "
             "Currently, \"ssh\", \"rdp\", or \"vnc\".")
    stack_ports = List(
        default=[],
        scope=Scope.settings,
        help="What ports are available in the stack.")
    stack_port_names = List(
        default=[],
        scope=Scope.settings,
        help="Names of ports defined above.")
    provider = String(
        default="default",
        scope=Scope.settings,
        help="Where to launch the stack.")

    # Set exclusively via XML
    tests = List(
        default=[],
        scope=Scope.content,
        help="The list of tests to run.")

    # User state, per instance.
    stack_run = String(
        default="",
        scope=Scope.user_state,
        help="The name of the run")
    stack_name = String(
        default="",
        scope=Scope.user_state,
        help="The name of the user's stack")
    check_id = String(
        default="",
        scope=Scope.user_state,
        help="The check task id")
    check_timestamp = Integer(
        default="",
        scope=Scope.user_state,
        help="When the check task was launched")
    check_status = Dict(
        default=None,
        scope=Scope.user_state,
        help="The check status")

    editable_fields = (
        'display_name',
        'weight',
        'stack_template_path',
        'stack_user_name',
        'stack_protocol',
        'stack_ports',
        'stack_port_names',
        'provider')

    has_author_view = True
    has_score = True
    has_children = True
    icon_class = 'problem'
    block_settings_key = SETTINGS_KEY

    @classmethod
    def parse_xml(cls, node, runtime, keys, id_generator):
        block = runtime.construct_xblock_from_class(cls, keys)

        # Find <test> children
        for child in node:
            if child.tag == "test":
                text = child.text

                # Fix up whitespace.
                if text[0] == "\n":
                    text = text[1:]
                text.rstrip()
                text = textwrap.dedent(text)

                block.tests.append(text)
            else:
                block.runtime.add_node_as_child(block, child, id_generator)

        # Attributes become fields.
        for name, value in node.items():
            if name in block.fields:
                value = (block.fields[name]).from_string(value)
                setattr(block, name, value)

        return block

    def author_view(self, context=None):
        """ Studio View """
        msg = u"This XBlock only renders content when viewed via the LMS."
        return Fragment(u'<em>%s</em></p>' % msg)

    def is_correct(self):
        if not (self.check_status and isinstance(self.check_status, dict)):
            return False
        else:
            total = self.check_status.get('total')
            if not total:
                return False
            else:
                score = self.check_status.get('pass')
                return score == total

    def get_block_ids(self):
        try:
            course_id = getattr(self.xmodule_runtime, 'course_id', 'all')
            student_id = self.xmodule_runtime.anonymous_student_id
        except AttributeError:
            course_id = 'all'
            student_id = self.scope_ids.user_id

        return (course_id, student_id)

    def get_stack_template(self):
        """
        Load the stack template directly from the course's content store.

        Note: accessing the contentstore directly is not supported by the
        XBlock API, so this depends on keeping pace with changes to
        edx-platform itself.  Because of it, this should be replaced with an
        HTTP GET to the LMS, in the future.

        """
        course_id, _ = self.get_block_ids()
        stack_template = None
        try:
            from xmodule.contentstore.content import StaticContent
            from xmodule.contentstore.django import contentstore
            from xmodule.exceptions import NotFoundError

            loc = StaticContent.compute_location(course_id,
                                                 self.stack_template_path)
            asset = contentstore().find(loc)
            stack_template = asset.data
        except (ImportError, NotFoundError):
            pass

        return stack_template

    def student_view(self, context=None):
        """
        The primary view of the HastexoXBlock, shown to students when viewing
        courses.
        """
        def error_frag(msg):
            """ Build a fragment to display runtime errors. """
            context = {'error_msg': msg}
            html = loader.render_template('static/html/error.html', context)
            frag = Fragment(html)
            frag.add_css_url(
                self.runtime.local_resource_url(self,
                                                'public/css/main.css')
            )
            return frag

        # Load configuration
        configuration = self.get_configuration()

        # Get the course id and anonymous user id, and derive the stack name
        # from them
        course_id, anonymous_student_id = self.get_block_ids()
        self.stack_run = "%s_%s" % (course_id.course, course_id.run)
        self.stack_name = "%s_%s" % (self.stack_run, anonymous_student_id)

        # Render the HTML template
        html = loader.render_template('static/html/main.html')
        frag = Fragment(html)

        # Add the public CSS and JS
        frag.add_css_url(
            self.runtime.local_resource_url(self, 'public/css/main.css')
        )
        frag.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/plugins.js')
        )
        frag.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/main.js')
        )

        # Set the port
        port = None
        if len(self.stack_ports) > 0:
            port = self.stack_get("port")
            if not port or port not in self.stack_ports:
                port = self.stack_ports[0]

        # Update stack info
        self.stack_update({
            "provider": self.provider,
            "protocol": self.stack_protocol,
            "port": port
        })

        # Call the JS initialization function
        frag.initialize_js('HastexoXBlock', {
            "terminal_url": configuration.get("terminal_url"),
            "timeouts": configuration.get("js_timeouts"),
            "has_tests": len(self.tests) > 0,
            "protocol": self.stack_protocol,
            "ports": self.stack_ports,
            "port_names": self.stack_port_names,
            "port": port,
            "provider": self.provider
        })

        return frag

    def get_configuration(self):
        """
        Get the configuration data for the student_view.

        """
        settings = self.get_xblock_settings(default=DEFAULT_SETTINGS)
        return get_xblock_configuration(settings, self.provider)

    @transaction.atomic()
    def stack_update(self, data):
        course_id, student_id = self.get_block_ids()
        stack, _ = Stack.objects.select_for_update().get_or_create(
            student_id=student_id,
            course_id=course_id,
            name=self.stack_name
        )
        for (field, value) in data.items():
            if hasattr(stack, field):
                setattr(stack, field, value)
        stack.save()

    @transaction.atomic()
    def stack_get(self, prop=None):
        course_id, student_id = self.get_block_ids()
        stack, _ = Stack.objects.select_for_update().get_or_create(
            student_id=student_id,
            course_id=course_id,
            name=self.stack_name
        )

        if prop:
            return getattr(stack, prop)
        else:
            return stack

    def reset_suspend_timestamp(self):
        self.stack_update({"suspend_timestamp": timezone.now()})

    def launch_stack_task(self, args):
        configuration = args[0]
        task = LaunchStackTask()
        soft_time_limit = configuration.get('launch_timeout')
        time_limit = soft_time_limit + 30
        result = task.apply_async(
            args=args,
            expires=soft_time_limit,
            soft_time_limit=soft_time_limit,
            time_limit=time_limit
        )
        logger.info(
            'Launch task id for '
            'stack [%s] is: [%s]' % (self.stack_name, result.id)
        )

        return result

    def launch_stack_task_result(self, task_id):
        return LaunchStackTask().AsyncResult(task_id)

    @XBlock.json_handler
    def get_user_stack_status(self, request_data, suffix=''):
        configuration = self.get_configuration()

        def _launch_stack(reset=False):
            args = (
                configuration,
                self.stack_run,
                self.stack_name,
                self.get_stack_template(),
                self.stack_user_name,
                reset
            )

            logger.info('Firing async launch '
                        'task for [%s]' % (self.stack_name))
            result = self.launch_stack_task(args)

            # Save task ID and timestamp
            self.stack_update({
                "launch_task_id": result.id,
                "launch_timestamp": timezone.now()
            })

            return result

        def _process_result(result):
            if result.ready():
                if (result.successful() and
                        isinstance(result.result, dict) and not
                        result.result.get('error')):
                    data = result.result
                else:
                    data = {
                        "status": LAUNCH_ERROR_STATE,
                        "error_msg": "Unexpected result: %s" % repr(result.result)  # noqa: E501
                    }
            else:
                data = {"status": LAUNCH_STATE}

            # Save status
            self.stack_update(data)

            return data

        def _process_stack_data(stack):
            data = {
                "status": stack.status,
                "error_msg": stack.error_msg,
                "ip": stack.ip,
                "user": stack.user,
                "key": stack.key,
                "password": stack.password
            }

            return data

        def _process_error(error_msg):
            data = {
                "status": LAUNCH_ERROR_STATE,
                "error_msg": error_msg
            }

            # Save status
            self.stack_update(data)

            return data

        # Fetch the stack
        stack = self.stack_get()

        # Calculate the time since the suspend timer was last reset.
        suspend_timeout = configuration.get("suspend_timeout")
        suspend_timestamp = stack.suspend_timestamp
        time_since_suspend = 0
        if suspend_timeout and suspend_timestamp:
            time_since_suspend = (timezone.now() - suspend_timestamp).seconds

        # Request type
        initialize = request_data.get("initialize", False)
        reset = request_data.get("reset", False)

        # Get the last stack status
        prev_status = stack.status

        # No last stack status: this is the first time
        # the user launches this stack.
        if not prev_status:
            logger.info('Launching stack [%s] '
                        'for the first time.' % (self.stack_name))
            result = _launch_stack(reset)
            stack_data = _process_result(result)

        # There was a previous attempt at launching the stack
        elif prev_status == LAUNCH_STATE:
            # Update task result
            launch_task_id = self.stack_get("launch_task_id")
            result = self.launch_stack_task_result(launch_task_id)
            stack_data = _process_result(result)
            current_status = stack_data.get("status")

            # Stack is still LAUNCH_STATE since last check.
            if current_status == LAUNCH_STATE:
                # Calculate time since launch
                launch_timestamp = self.stack_get("launch_timestamp")
                time_since_launch = (timezone.now() - launch_timestamp).seconds
                launch_timeout = configuration.get("launch_timeout")

                # Check if the pending task hasn't timed out.
                if time_since_launch <= launch_timeout:
                    # The pending task still has some time to finish.
                    # Please wait.
                    logger.info('Launch pending for [%s]' % (self.stack_name))

                elif initialize or reset:
                    # Timeout reached, but the user just entered the page or
                    # requested a reset.  Try launching the stack again.
                    if initialize:
                        logger.info('Launch timeout detected on initialize. '
                                    'Launching stack [%s]' % (self.stack_name))
                    else:
                        logger.info('Launch timeout detected on reset. '
                                    'Resetting stack [%s]' % (self.stack_name))
                    result = _launch_stack(reset)
                    stack_data = _process_result(result)

                else:
                    # Timeout reached.  Consider the task a failure and let the
                    # user retry manually.
                    logger.error('Launch timeout reached for [%s] '
                                 'after %s seconds' % (self.stack_name,
                                                       time_since_launch))
                    stack_data = _process_error("Timeout when launching "
                                                "or resuming stack.")

            # Stack changed from LAUNCH_STATE to COMPLETE.
            elif current_status in UP_STATES:
                if reset or (suspend_timeout and time_since_suspend >= suspend_timeout):  # noqa: E501
                    if reset:
                        logger.info('Resetting successfully launched '
                                    'stack [%s].' % (self.stack_name))
                    else:
                        logger.info('Stack [%s] may have suspended. '
                                    'Relaunching.' % (self.stack_name))
                    result = _launch_stack(reset)
                    stack_data = _process_result(result)

                # The stack couldn't have been suspended, yet.
                else:
                    logger.info('Successful launch detected for [%s], '
                                'with status [%s]' % (self.stack_name,
                                                      current_status))

            # Detected a failed launch attempt, but the user has requested a
            # retry, just entered the page, or requested a reset, so start from
            # scratch.
            elif initialize or reset:
                if reset:
                    logger.info('Resetting failed '
                                'stack [%s].' % (self.stack_name))
                else:
                    logger.info('Retrying previously failed '
                                'stack [%s].' % (self.stack_name))
                result = _launch_stack(reset)
                stack_data = _process_result(result)

            # Detected a failed launch attempt.
            # Report the error and let the user retry manually.
            else:
                logger.error('Failed launch detected for [%s], '
                             'with status [%s]' % (self.stack_name,
                                                   current_status))

        # The stack was previously launched successfully
        elif prev_status in UP_STATES:
            if reset or (suspend_timeout and time_since_suspend >= suspend_timeout):  # noqa: E501
                if reset:
                    logger.info('Resetting successfully launched '
                                'stack [%s].' % (self.stack_name))
                else:
                    logger.info('Stack [%s] may have suspended. '
                                'Relaunching.' % (self.stack_name))
                result = _launch_stack(reset)
                stack_data = _process_result(result)

            else:
                logger.info('Successful launch detected for [%s], '
                            'with status [%s]' % (self.stack_name,
                                                  prev_status))
                stack_data = _process_stack_data(stack)

        # Detected a failed launch attempt, but the user just entered the page,
        # or requested a retry or reset, so start from scratch.
        elif initialize or reset:
            if reset:
                logger.info('Resetting failed stack [%s].' % (self.stack_name))
            else:
                logger.info('Retrying previously failed '
                            'stack [%s].' % (self.stack_name))
            result = _launch_stack(reset)
            stack_data = _process_result(result)

        # Detected a failed launch attempt.  Report the error and let the user
        # retry manually.
        else:
            logger.error('Failed launch detected for [%s], '
                         'with status [%s]' % (self.stack_name,
                                               prev_status))
            stack_data = _process_stack_data(stack)

        # Reset the dead man's switch
        self.reset_suspend_timestamp()

        return stack_data

    @XBlock.json_handler
    def keepalive(self, data, suffix=''):
        # Reset the dead man's switch
        self.reset_suspend_timestamp()

    def check_progress_task(self, args):
        configuration = args[0]
        task = CheckStudentProgressTask()
        soft_time_limit = configuration.get('check_timeout')
        time_limit = soft_time_limit + 30
        result = task.apply_async(
            args=args,
            expires=soft_time_limit,
            soft_time_limit=soft_time_limit,
            time_limit=time_limit
        )

        return result

    def check_progress_task_result(self, check_id):
        return CheckStudentProgressTask().AsyncResult(check_id)

    @XBlock.json_handler
    def get_check_status(self, data, suffix=''):
        """
        Checks the current student score.
        """
        configuration = self.get_configuration()

        def _launch_check():
            stack = self.stack_get()
            logger.info('Executing tests for stack [%s], IP [%s], user [%s]:' %
                        (self.stack_name, stack.ip,
                         self.stack_user_name))
            for test in self.tests:
                logger.info('Test: %s' % test)

            args = (
                configuration,
                self.tests,
                stack.ip,
                self.stack_user_name,
                stack.key
            )
            result = self.check_progress_task(args)

            # Save task ID and timestamp
            self.check_id = result.id
            self.check_timestamp = int(time.time())

            return result

        def _process_result(result):
            if result.ready():
                # Clear the task ID so we know there is no task running.
                self.check_id = ""

                if (result.successful() and
                        isinstance(result.result, dict) and not
                        result.result.get('error')):
                    status = result.result

                    # Publish the grade
                    self.runtime.publish(self, 'grade', {
                        'value': status['pass'],
                        'max_value': status['total']
                    })
                else:
                    status = {
                        'status': 'ERROR',
                        'error_msg': 'Unexpected result: %s' % repr(result.result)  # noqa: E501
                    }
            else:
                status = {'status': 'CHECK_PROGRESS_PENDING'}

            # Store the result
            self.check_status = status

            return status

        # If a check task is running, return its status.
        if self.check_id:
            logger.info('Check progress task is running: %s' % self.check_id)
            result = self.check_progress_task_result(self.check_id)
            status = _process_result(result)

            if status['status'] == 'CHECK_PROGRESS_PENDING':
                time_since_check = int(time.time()) - self.check_timestamp
                check_timeout = configuration.get("check_timeout")

                # Check if the pending task hasn't timed out.
                if time_since_check >= check_timeout:
                    # Timeout reached.  Consider the task a failure and let the
                    # user retry manually.
                    logger.error('Check timeout reached for [%s] '
                                 'after %s seconds' % (self.stack_name,
                                                       time_since_check))
                    self.check_id = ""
                    status = {'status': 'ERROR',
                              'error_msg': "Timeout when checking progress."}

        # Otherwise, launch the check task.
        else:
            result = _launch_check()
            status = _process_result(result)

        return status

    @XBlock.json_handler
    def set_port(self, data, suffix=''):
        # Set the preferred stack port
        self.stack_update({"port": int(data.get("port"))})

    @staticmethod
    def workbench_scenarios():
        """A canned scenario for display in the workbench."""
        return [
            ("HastexoXBlock",
             """<vertical_demo>
                <hastexo/>
                </vertical_demo>
             """),
        ]
Пример #15
0
class VideoFields(object):
    """Fields for `VideoModule` and `VideoDescriptor`."""
    display_name = String(display_name="显示名称",
                          help="显示模块名称",
                          default="Video",
                          scope=Scope.settings)
    saved_video_position = RelativeTime(help="Current position in the video",
                                        scope=Scope.user_state,
                                        default=datetime.timedelta(seconds=0))
    # TODO: This should be moved to Scope.content, but this will
    # require data migration to support the old video module.
    youtube_id_1_0 = String(help="这是正常的视频播放速度",
                            display_name="Video ID",
                            scope=Scope.settings,
                            default="OEoXaMPEzfM")
    youtube_id_0_75 = String(help="可选项,对于旧的浏览器:播放速度为正常的0.75。",
                             display_name=".75x speed",
                             scope=Scope.settings,
                             default="")
    youtube_id_1_25 = String(help="可选项,对于旧的浏览器:播放速度为正常的1.25倍。",
                             display_name="1.25x speed",
                             scope=Scope.settings,
                             default="")
    youtube_id_1_5 = String(help="可选项,对于旧的浏览器:播放速度为正常的1.5倍.",
                            display_name="1.5x speed",
                            scope=Scope.settings,
                            default="")
    start_time = RelativeTime(  # datetime.timedelta object
        help="视频的开始时间 (HH:MM:SS).最大值为 23:59:59",
        display_name="开始时间",
        scope=Scope.settings,
        default=datetime.timedelta(seconds=0))
    end_time = RelativeTime(  # datetime.timedelta object
        help="视频的结束时间 (HH:MM:SS).最大值为 23:59:59",
        display_name="结束时间",
        scope=Scope.settings,
        default=datetime.timedelta(seconds=0))
    #front-end code of video player checks logical validity of (start_time, end_time) pair.

    # `source` is deprecated field and should not be used in future.
    # `download_video` is used instead.
    source = String(help="视频外部链接下载地址",
                    display_name="视频下载",
                    scope=Scope.settings,
                    default="")
    download_video = Boolean(help="显示视频下方的链接,让学生下载的视频。注意:您必须在下面添加至少一个视频源.",
                             display_name="允许下载视频",
                             scope=Scope.settings,
                             default=False)
    html5_sources = List(
        help="文件名要与HTML5视频使用的清单。第一个支持的文件类型就会显示出来。",
        display_name="视频资源",
        scope=Scope.settings,
    )
    track = String(help="外部URL下载时记录跟踪,如下显示的视频连接。",
                   display_name="下载记录",
                   scope=Scope.settings,
                   default='')
    download_track = Boolean(help="显示视频下方的链接,让学生下载视频。注意:您必须添加一个HTML5的链接记录。",
                             display_name="允许下载记录",
                             scope=Scope.settings,
                             default=False)
    sub = String(help="跟踪记录的名称.",
                 display_name="记录(初级)",
                 scope=Scope.settings,
                 default="")
    show_captions = Boolean(help="这种控制标题是否被默认显示。",
                            display_name="显示记录",
                            scope=Scope.settings,
                            default=True)
    # Data format: {'de': 'german_translation', 'uk': 'ukrainian_translation'}
    transcripts = Dict(help="用其他语言添加翻译译文",
                       display_name="翻译记录",
                       scope=Scope.settings,
                       default={})
    transcript_language = String(
        help="Preferred language for transcript",
        display_name="Preferred language for transcript",
        scope=Scope.preferences,
        default="en")
    speed = Float(
        help="The last speed that was explicitly set by user for the video.",
        scope=Scope.user_state,
    )
    global_speed = Float(
        help=
        "Default speed in cases when speed wasn't explicitly for specific video",
        scope=Scope.preferences,
        default=1.0)
Пример #16
0
class AnimationXBlock(XBlock):
    """
    TO-DO: document what your XBlock does.
    """

    # Fields are defined on the class.  You can access them in your code as
    # self.<fieldname>.

    # TO-DO: delete count, and define your own fields.
    animation = List(
        default=[], 
        scope=Scope.settings,
        help="Animation",
    )

    height = Integer(
        scope=Scope.settings,
        help="Height"
        )

    textheight = Integer(
        scope=Scope.settings,
        help="Text Height"
        )

    width = Integer(
        scope=Scope.settings,
        help="Width"
        )

    position = Integer(
        scope=Scope.user_state,
        help="Current position",
        default=0
    )

    max_position = Integer(
        scope=Scope.user_state,
        help="Maximum position (for progress)",
        default=0
    )

    @XBlock.json_handler
    def update_position(self, data, suffix):
        if 'position' in data:
            self.position = data['position']
        if 'max_position' in data:
            self.max_position = data['max_position']
            grade = self.max_position/float(len(self.animation))
            self.runtime.publish(self, 'grade', {'value':grade, 'max_value': 1})
        return {"status":"success"}

    def resource_string(self, path):
        """Handy helper for getting resources from our kit."""
        data = pkg_resources.resource_string(__name__, path)
        return data.decode("utf8")

    # TO-DO: change this view to display your data your own way.
    def student_view(self, context=None):
        """
        The primary view of the AnimationXBlock, shown to students
        when viewing courses.
        """
        html = self.resource_string("static/html/animation.html")
        frag = Fragment(html.format(height = self.height, 
                                    textheight = self.textheight, 
                                    width=self.width, 
                                    inner_width=self.width-20, 
                                    animation = json.dumps(self.animation),
                                    position = self.position, 
                                    max_position = self.max_position))
#        frag.add_javascript_url("//ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js")
        frag.add_css_url("//ajax.googleapis.com/ajax/libs/jqueryui/1.10.4/themes/smoothness/jquery-ui.css")
        frag.add_css(self.resource_string("static/css/jquery.ui.labeledslider.css"))
        frag.add_javascript_url("//ajax.googleapis.com/ajax/libs/jqueryui/1.10.4/jquery-ui.min.js")
        frag.add_javascript(self.resource_string("static/js/src/jquery.ui.labeledslider.js"))
        frag.add_css(self.resource_string("static/css/animation.css"))
        frag.add_javascript(self.resource_string("static/js/src/animation.js"))
        frag.initialize_js('AnimationXBlock')
        return frag

    @classmethod
    def parse_xml(cls, node, runtime, keys, id_generator):
        """
        Parse the XML for an HTML block.

        The entire subtree under `node` is re-serialized, and set as the
        content of the XBlock.

        """
        block = runtime.construct_xblock_from_class(cls, keys)
        animation = []

        element = {"desc":""} # Dummy; ignored
        for line in node.text.split('\n'):
            line = line.strip()
            if line.startswith("http"):
                element = {"src": line, "desc":""}
                animation.append(element)
            else:
                element["desc"] = element["desc"]  + " " + line


        block.animation = animation
        block.height = node.attrib["height"]
        block.textheight = node.attrib["textheight"]
        block.width = node.attrib["width"]

        return block

    # TO-DO: change this to create the scenarios you'd like to see in the
    # workbench while developing your XBlock.
    @staticmethod
    def workbench_scenarios():
        """A canned scenario for display in the workbench."""
        return [
            ("AnimationXBlock",
             """<vertical_demo>
                <animation width="460" height="384" textheight="100">
http://upload.wikimedia.org/wikipedia/commons/e/e8/Pin_tumbler_no_key.svg
Without a key in the lock, the driver pins (blue) are pushed downwards, preventing the plug (yellow) from rotating.
http://upload.wikimedia.org/wikipedia/commons/5/54/Pin_tumbler_bad_key.svg
When an incorrect key is inserted into the lock, the key pins (red) and driver pins (blue) do not align with the shear line; therefore, it does not allow the plug (yellow) to rotate.
http://upload.wikimedia.org/wikipedia/commons/6/6e/Pin_tumbler_with_key.svg
When the correct key is inserted, the gaps between the key pins (red) and driver pins (blue) align with the edge of the plug (yellow).
http://upload.wikimedia.org/wikipedia/commons/e/e1/Pin_tumbler_unlocked.svg
With the gaps between the pins aligned with the shear line, the plug (yellow) can rotate freely.
                </animation>
                </vertical_demo>
             """),
        ]
    ## Everything below is stolen from https://github.com/edx/edx-ora2/blob/master/apps/openassessment/xblock/lms_mixin.py
    ## It's needed to keep the LMS+Studio happy. 
    ## It should be included as a mixin. 
    ## 
    ## The only LMS functionality we need and use is grading. Cale
    ## believes most of this is unnecessary, but I did not want to do
    ## a binary search for what is and is not necessary, since this is
    ## effectively a TODO. 

    display_name = String(
        default="Completion", scope=Scope.settings,
        help="Display name"
    )

    start = DateTime(
        default=None, scope=Scope.settings,
        help="ISO-8601 formatted string representing the start date of this assignment. We ignore this."
    )

    due = DateTime(
        default=None, scope=Scope.settings,
        help="ISO-8601 formatted string representing the due date of this assignment. We ignore this."
    )

    weight = Float(
        display_name="Problem Weight",
        help=("Defines the number of points each problem is worth. "
              "If the value is not set, the problem is worth the sum of the "
              "option point values."),
        values={"min": 0, "step": .1},
        scope=Scope.settings
    )

    def has_dynamic_children(self):
        """Do we dynamically determine our children? No, we don't have any.
        """
        return False

    def max_score(self):
        """The maximum raw score of our problem.
        """
        return 1
Пример #17
0
class MRQBlock(SubmittingXBlockMixin, StudentViewUserStateMixin,
               QuestionnaireAbstractBlock, ExpandStaticURLMixin):
    """
    An XBlock used to ask multiple-response questions
    """
    CATEGORY = 'pb-mrq'
    STUDIO_LABEL = _("Multiple Response Question")
    USER_STATE_FIELDS = [
        'student_choices',
    ]

    student_choices = List(
        # Last submissions by the student
        default=[],
        scope=Scope.user_state)
    required_choices = List(
        display_name=_("Required Choices"),
        help=
        _("Specify the value[s] that students must select for this MRQ to be considered correct."
          ),
        scope=Scope.content,
        list_values_provider=QuestionnaireAbstractBlock.choice_values_provider,
        list_style='set',  # Underered, unique items. Affects the UI editor.
        default=[],
    )
    ignored_choices = List(
        display_name=_("Ignored Choices"),
        help=_(
            "Specify the value[s] that are neither correct nor incorrect. "
            "Any values not listed as required or ignored will be considered wrong."
        ),
        scope=Scope.content,
        list_values_provider=QuestionnaireAbstractBlock.choice_values_provider,
        list_style='set',  # Underered, unique items. Affects the UI editor.
        default=[],
    )
    message = String(display_name=_("Message"),
                     help=_("General feedback provided when submitting"),
                     scope=Scope.content,
                     default="")
    hide_results = Boolean(display_name="Hide results",
                           scope=Scope.content,
                           default=False)
    editable_fields = (
        'question',
        'required_choices',
        'ignored_choices',
        'message',
        'display_name',
        'show_title',
        'weight',
        'hide_results',
    )

    def describe_choice_correctness(self, choice_value):
        if choice_value in self.required_choices:
            return self._("Required")
        elif choice_value in self.ignored_choices:
            return self._("Ignored")
        return self._("Not Acceptable")

    def get_results(self, previous_result):
        """
        Get the results a student has already submitted.
        """
        result = self.calculate_results(previous_result['submissions'])
        result['completed'] = True
        return result

    def get_last_result(self):
        if self.student_choices:
            return self.get_results({'submissions': self.student_choices})
        else:
            return {}

    def submit(self, submissions):
        log.debug('Received MRQ submissions: "%s"', submissions)

        result = self.calculate_results(submissions)
        self.student_choices = submissions

        log.debug('MRQ submissions result: %s', result)
        return result

    def calculate_results(self, submissions):
        score = 0
        results = []
        tips = None

        if not self.hide_results:
            tips = self.get_tips()

        for choice in self.custom_choices:
            choice_completed = True
            choice_tips_html = []
            choice_selected = choice.value in submissions

            if choice.value in self.required_choices:
                if not choice_selected:
                    choice_completed = False
            elif choice_selected and choice.value not in self.ignored_choices:
                choice_completed = False

            if choice_completed:
                score += 1

            choice_result = {
                'value': choice.value,
                'selected': choice_selected,
                'content': choice.content
            }
            # Only include tips/results in returned response if we want to display them
            if not self.hide_results:
                # choice_tips_html list is being set only when 'self.hide_results' is False, to optimize,
                # execute the loop only when 'self.hide_results' is set to False
                for tip in tips:
                    if choice.value in tip.values:
                        choice_tips_html.append(
                            tip.render('mentoring_view').content)
                        break

                loader = ResourceLoader(__name__)
                choice_result['completed'] = choice_completed
                choice_result['tips'] = loader.render_django_template(
                    'templates/html/tip_choice_group.html', {
                        'tips_html': choice_tips_html,
                    })

            results.append(choice_result)

        status = 'incorrect' if score <= 0 else 'correct' if score >= len(
            results) else 'partial'

        if sub_api:
            # Send the answer as a concatenated list to the submissions API
            answer = [
                choice['content'] for choice in results if choice['selected']
            ]
            sub_api.create_submission(self.student_item_key, ', '.join(answer))

        return {
            'submissions': submissions,
            'status': status,
            'choices': results,
            'message': self.message_formatted,
            'weight': self.weight,
            'score': (float(score) / len(results)) if results else 0,
        }

    def validate_field_data(self, validation, data):
        """
        Validate this block's field data.
        """
        super().validate_field_data(validation, data)

        def add_error(msg):
            validation.add(ValidationMessage(ValidationMessage.ERROR, msg))

        def choice_name(choice_value):
            for choice in self.human_readable_choices:
                if choice["value"] == choice_value:
                    return choice["display_name"]
            return choice_value

        all_values = set(self.all_choice_values)
        required = set(data.required_choices)
        ignored = set(data.ignored_choices)

        if len(required) < len(data.required_choices):
            add_error(self._("Duplicate required choices set"))
        if len(ignored) < len(data.ignored_choices):
            add_error(self._("Duplicate ignored choices set"))
        for val in required.intersection(ignored):
            add_error(
                self._("A choice is listed as both required and ignored: {}").
                format(choice_name(val)))
        for val in (required - all_values):
            add_error(
                self._("A choice value listed as required does not exist: {}").
                format(choice_name(val)))
        for val in (ignored - all_values):
            add_error(
                self._("A choice value listed as ignored does not exist: {}").
                format(choice_name(val)))

    def student_view_data(self, context=None):
        """
        Returns a JSON representation of the student_view of this XBlock,
        retrievable from the Course Block API.
        """
        return {
            'id':
            self.name,
            'block_id':
            str(self.scope_ids.usage_id),
            'display_name':
            self.display_name,
            'title':
            self.display_name,
            'type':
            self.CATEGORY,
            'weight':
            self.weight,
            'question':
            self.expand_static_url(self.question),
            'message':
            self.message,
            'choices': [{
                'value':
                choice['value'],
                'content':
                self.expand_static_url(choice['display_name'])
            } for choice in self.human_readable_choices],
            'hide_results':
            self.hide_results,
            'tips':
            [tip.student_view_data()
             for tip in self.get_tips()] if not self.hide_results else [],
        }
Пример #18
0
class TemplateBuilderContainerBlock(StudioContainerWithNestedXBlocksMixin, XBlock, StudioEditableXBlockMixin):
    display_name = String(
        display_name = _("Title"),
        help =_("This block is a container of problem template builder xblocks developed by GCS"),
        scope = Scope.settings,
        default = _("GCS Problem Builder")
        )

    library_mode = Boolean(
        display_name=_("Library Mode"),
        default=False,
        help=_("If True, use this container xBlock as a library, i.e. it will randomly pick the number of components set at field 'Count' and render to studnets."),
        scope=Scope.settings,
    )

    count = Integer(
        display_name = _("Count"),
        default = 1,
        help=_("Enter the number of components to display to each student."),
        scope = Scope.settings,
        )

    random_samples = List(
        default=[],
        scope= Scope.user_state
    )

    editable_fields = ('display_name', 'library_mode', 'count')

    @property
    def allowed_nested_blocks(self):
        '''
        Define nested XBlock list
        '''
        return [MathProblemTemplateBuilderXBlock]

    def validate_field_data(self, validation, data):
        """""
        Ask this xblock to validate itself.
        XBlock subclass are expected to override this method. Any overiding method should call super() to collect 
        validation results from its superclass, and then add any additional results as necesary.
        """""
        super(TemplateBuilderContainerBlock, self).validate_field_data(validation, data)
        def add_error(msg):
            validation.add(ValidationMessage(ValidationMessage.ERROR, msg))

    def author_edit_view(self, context = None):
        frag = super(TemplateBuilderContainerBlock, self).author_edit_view(context)
        return frag

    def student_view(self, context):
        children_contents = []
        fragment = Fragment()
        # print "Type of self.children: {}".format(type(self.children))
        # print "self.children = {}".format(self.children)

        # # Process library mode
        # if not self.library_mode:
        #     # Get all child components
        #     self.random_samples = self.children
        # else:
        #     # Randomly pick Count samples from the list of all child components to render to specific student on specific course.
        #     #
        #     # refer: https://stackoverflow.com/questions/15511349/select-50-items-from-list-at-random-to-write-to-file
        #
        #     # Check Count vs Total child components
        #     number_of_childs = len(self.children)
        #     if self.count > number_of_childs:
        #         self.count = number_of_childs
        #     # Pick self.count samples from the childrent list
        #     self.random_samples = random.sample(self.children, self.count)
        # print "Type of self.random_samples = {}".format(type(self.random_samples))
        # print "self.random_samples = {}".format(self.random_samples)

        # for child_id in self.random_samples:
        for child_id in self.children:
            child = self.runtime.get_block(child_id)
            child_fragment = self._render_child_fragment(child, context, 'student_view')
            fragment.add_frag_resources(child_fragment)
            children_contents.append(child_fragment.content)

        render_context = {
            'block': self,
            'children_contents': children_contents
        }
        render_context.update(context)
        fragment.add_content(self.loader.render_template(self.CHILD_PREVIEW_TEMPLATE, render_context))

        return fragment
Пример #19
0
class Question():

    statement = String(display_name=_("Statement"),
                       help=_("Statement of the question"),
                       scope=Scope.content,
                       default="")

    type = String(display_name=_("QuestionType"),
                  help=_("Type of the question"),
                  scope=Scope.content,
                  default="")

    url = String(display_name=_("Url"),
                 help=_("Url of question for Video or Image types"),
                 scope=Scope.content,
                 default="")

    answers = List(
        display_name=_("Questions"),
        help=_("List of Questions for this tournament"),
        scope=Scope.content,
        default=[],
        list_style='set',
    )

    correctAnswer = Integer(
        default=0,
        scope=Scope.user,
        help="Number of points",
    )

    @property
    def display_name_with_default(self):
        values_list = []
        for entry in self.get_parent().human_readable_choices:
            if entry["value"] in self.values:
                display_name = strip_tags(
                    entry["display_name"]
                )  # Studio studio_view can't handle html in display_name
                if len(display_name) > 20:
                    display_name = display_name[:20] + u'…'
                values_list.append(display_name)
        return self._(u"Tip for {list_of_choices}").format(
            list_of_choices=u", ".join(values_list))

    def mentoring_view(self, context=None):
        """ Render this XBlock within a mentoring block. """
        html = loader.render_template("templates/html/tip.html", {
            'content': self.content,
            'width': self.width,
            'height': self.height,
        })
        return Fragment(html)

    def student_view(self, context=None):
        """ Normal view of this XBlock, identical to mentoring_view """
        return self.mentoring_view(context)

    def clean_studio_edits(self, data):
        """
        Clean up the edits during studio_view save
        """
        if "values" in data:
            data["values"] = list([unicode(v) for v in set(data["values"])])

    def validate_field_data(self, validation, data):
        """
        Validate this block's field data.
        """
        super(TipBlock, self).validate_field_data(validation, data)

        def add_error(msg):
            validation.add(ValidationMessage(ValidationMessage.ERROR, msg))

        try:
            valid_values = set(self.get_parent().all_choice_values)
        except Exception:
            pass
        else:
            for dummy in set(data.values) - valid_values:
                add_error(
                    self._(u"A choice selected for this tip does not exist."))

    @classmethod
    def parse_xml(cls, node, runtime, keys, id_generator):
        """
        Construct this XBlock from the given XML node.
        """
        block = runtime.construct_xblock_from_class(cls, keys)

        block.values = cls.values.from_string(node.get('values', '[]'))
        block.width = node.get('width', '')
        block.height = node.get('height', '')

        block.content = unicode(node.text or u"")
        for child in node:
            block.content += etree.tostring(child, encoding='unicode')

        return block
Пример #20
0
class QuizBlock(ResourceMixin, QuizResultMixin, ExportDataBlock,
                XBlockWithTranslationServiceMixin):
    """
    An XBlock which can be used to add diagnostic quiz
    """
    BUZZFEED_QUIZ_VALUE = "BFQ"
    BUZZFEED_QUIZ_LABEL = _("BuzzFeed-style")
    DIAGNOSTIC_QUIZ_VALUE = "DG"
    DIAGNOSTIC_QUIZ_LABEL = _("Diagnostic-style")
    DEFAULT_GROUP = _('Default Group')

    display_name = String(
        display_name=_("Diagnostic Feedback"),
        help=
        _("This name appears in the horizontal navigation at the top of the page."
          ),
        scope=Scope.settings,
        default="")

    title = String(default='', scope=Scope.content, help=_("Title of quiz"))

    description = String(default="",
                         scope=Scope.content,
                         help=_("Description of quiz"))

    questions = List(
        default=[],
        help=_("This will hold list of question with respective choices"),
        scope=Scope.content,
    )

    student_choices = Dict(
        default={},
        help=_("This will hold user provided answers of questions"),
        scope=Scope.user_state,
    )

    quiz_type = String(default="", scope=Scope.content, help=_("Type of quiz"))

    results = List(default=[], scope=Scope.content, help=_("List of results"))

    student_result = String(default='',
                            scope=Scope.user_state,
                            help=_("Calculated feedback of each user"))

    types = List(default=[
        {
            "value": BUZZFEED_QUIZ_VALUE,
            "label": BUZZFEED_QUIZ_LABEL
        },
        {
            "value": DIAGNOSTIC_QUIZ_VALUE,
            "label": DIAGNOSTIC_QUIZ_LABEL
        },
    ],
                 scope=Scope.content,
                 help=_("List of results"))

    groups = List(default=[DEFAULT_GROUP],
                  scope=Scope.content,
                  help=_("List of results"))

    current_step = Integer(
        default=0,
        scope=Scope.user_state,
        help=_("To control which question should be shown to student"))

    weight = Float(display_name=_("Weight"),
                   help=_("Defines the maximum total grade of this question."),
                   default=1,
                   scope=Scope.content,
                   enforce_type=True)

    completed = Boolean(default=False,
                        scope=Scope.user_state,
                        help=_("Has the student completed this quiz"))

    @property
    def display_name_with_default(self):
        return self.title

    @property
    def additional_publish_event_data(self):
        return {
            'user_id': self.scope_ids.user_id,
            'block_id': self.get_block_id(),
            'component_id': self.scope_ids.usage_id
        }

    has_score = True

    def get_fragment(self, context, view='studio', json_args=None):
        """
        return fragment after loading css/js/html either for studio OR student view
        :param context: context for templates
        :param view: view_type i;e studio/student
        :return: fragment after loading all assets
        """
        """
            Return fragment after adding required css/js/html
        """
        fragment = Fragment()
        self.add_templates(fragment, context, view)
        self.add_css(fragment, view)
        self.add_js(fragment, view)
        self.initialize_js_classes(fragment, view, json_args)
        return fragment

    def append_choice(self, questions):
        """
        append student choice with each question if available
        :param questions: list of questions
        :return:
        """
        """

        """
        for question in questions:
            if self.quiz_type == self.DIAGNOSTIC_QUIZ_VALUE:
                question['student_choice'] = float(self.student_choices.get(question['id'])) if \
                    self.student_choices.get(question['id']) else ''
            else:
                question['student_choice'] = self.student_choices.get(
                    question['id'], '')

    def get_block_id(self):
        """
        Return ID of `block`
        """
        usage_id = self.scope_ids.usage_id
        # Try accessing block ID. If usage_id does not have it, return usage_id itself
        return six.text_type(getattr(usage_id, 'block_id', usage_id))

    def get_question(self, question_id):
        """
        Return Question object for given question id
        """
        question = {}
        for question in self.questions:
            if question['id'] == question_id:
                question = question
                break

        return question

    def get_buzzfeed_answer(self, choices, student_choice):
        """
        Return buzzfeed quiz answer label from question choices using student choice
        """
        choice_name = ''
        for choice in choices:
            if choice['category_id'] == student_choice:
                choice_name = choice['name']
                break

        return choice_name

    def get_diagnostic_answer(self, choices, student_choice):
        """
        Return diagnostic quiz answer label from question choices using student choice
        """
        choice_name = ''
        for choice in choices:
            if str(choice['value']) == student_choice:
                choice_name = choice['name']
                break

        return choice_name

    @XBlock.supports("multi_device")  # Mark as mobile-friendly
    def student_view(self, context=None):
        """
        it will loads student view
        :param context: context
        :return: fragment
        """

        context = {
            'questions': copy.deepcopy(self.questions),
            'self': self,
            'block_id': "xblock-{}".format(self.get_block_id()),
            'user_is_staff': self.user_is_staff()
        }

        if self.student_choices:
            self.append_choice(context['questions'])

        # return final result to show if user already completed the quiz
        if self.questions and self.current_step:
            if len(self.questions) == self.current_step:
                context['result'] = self.get_result()

        return self.get_fragment(context, 'student', {
            'quiz_type': self.quiz_type,
            'quiz_title': self.title
        })

    def student_view_data(self, context=None):
        """
        Returns a JSON representation of the Diagnostic Feedback Xblock, that
        can be retrieved using Course Block API.
        """
        return {
            'quiz_type': self.quiz_type,
            'quiz_title': self.title,
            'questions': self.questions,
            'description': self.description,
        }

    @XBlock.handler
    def student_view_user_state(self, data, suffix=''):
        """
        Returns a JSON representation of the student data for Diagnostic Feedback Xblock
        """
        response = {
            'student_choices': self.student_choices,
            'student_result': self.student_result,
            'current_step': self.current_step,
            'completed': self.completed,
        }

        return Response(json.dumps(response),
                        content_type='application/json',
                        charset='utf8')

    def get_attached_groups(self):
        # return already attached groups
        groups = []
        for r in self.results:
            if r['group'] not in groups:
                groups.append(r['group'])

        return groups

    def studio_view(self, context):
        """
        it will loads studio view
        :param context: context
        :return: fragment
        """
        block_id = "xblock-{}".format(self.get_block_id())
        course_key = getattr(self.scope_ids.usage_id, 'course_key', None)

        context['self'] = self
        context['block_id'] = block_id

        try:
            from xmodule.contentstore.content import StaticContent
            base_asset_url = StaticContent.get_base_url_path_for_course_assets(
                course_key)
        except Exception:
            base_asset_url = ''

        return self.get_fragment(
            context, 'studio', {
                'base_asset_url':
                base_asset_url,
                'quiz_type':
                self.quiz_type,
                'block_id':
                block_id,
                'results':
                self.results,
                'BUZZFEED_QUIZ_VALUE':
                self.BUZZFEED_QUIZ_VALUE,
                'DIAGNOSTIC_QUIZ_VALUE':
                self.DIAGNOSTIC_QUIZ_VALUE,
                'DEFAULT_GROUP':
                self.DEFAULT_GROUP,
                'questions':
                self.questions,
                'groups':
                self.groups,
                'attachedGroups':
                self.get_attached_groups(),
                'categoryTpl':
                loader.load_unicode('templates/underscore/category.html'),
                'rangeTpl':
                loader.load_unicode('templates/underscore/range.html'),
                'questionTpl':
                loader.load_unicode('templates/underscore/question.html'),
                'choiceTpl':
                loader.load_unicode('templates/underscore/choice.html')
            })

    @XBlock.json_handler
    def save_data(self, data, suffix=''):
        """
        ajax handler to save data after applying required validation & filtration
        :param data: step data to save
        :param suffix:
        :return: response dict
        """

        success = True
        response_message = ""
        step = data.get('step', '')

        if not step:
            success = False
            response_message = self._('missing step number')
        else:
            try:
                is_valid_data, response_message = Validator.validate(
                    self, data)
                if is_valid_data:
                    response_message = MainHelper.save_filtered_data(
                        self, data)
                else:
                    success = False

            except Exception as ex:
                success = False
                response_message += ex.message if ex.message else str(ex)

        return {'step': step, 'success': success, 'msg': response_message}

    @XBlock.json_handler
    def save_choice(self, data, suffix=''):
        """
        save student choice for a question after validations
        :param data: answer data
        :param suffix:
        :return: response dict
        """
        # Import is placed here to avoid model import at project startup.
        try:
            from submissions import api as submissions_api
        except ImportError:
            log.info("Cannot import submissions_api")
            submissions_api = None

        student_result = ""
        response_message = ""

        try:
            success, response_message = Validator.validate_student_answer(
                self, data)
            if success:
                question_id = data['question_id']

                # save student answer
                self.student_choices[question_id] = data['student_choice']
                if (self.current_step) < int(data['currentStep']):
                    self.current_step = int(data['currentStep'])

                # calculate feedback result if user answering last question
                if data['isLast']:
                    student_result = self.get_result()

                    if not self.completed:
                        # Save the latest score and make quiz completed
                        self.runtime.publish(self, 'grade', {
                            'value': 1.0,
                            'max_value': 1.0
                        })
                        self.completed = True

                    if submissions_api:
                        log.info("have sub_api instance")
                        # Also send to the submissions API:
                        item_key = self.student_item_key
                        item_key['item_id'] = self.get_block_id()
                        submission_data = self.create_submission_data()
                        submission_data['final_result'] = student_result
                        submissions_api.create_submission(
                            item_key, json.dumps(submission_data))

                response_message = self._("Your response is saved")
        except Exception as ex:
            success = False
            response_message += str(ex)
        return {
            'success': success,
            'student_result': student_result,
            'response_msg': response_message
        }

    @XBlock.json_handler
    def start_over_quiz(self, data, suffix=''):
        """
        reset student_choices, student_result, current_step for current user
        :param data: empty dict
        :param suffix:
        :return: response dict
        """

        success = True
        response_message = self._("student data cleared")

        self.student_choices = {}
        self.student_result = ""
        self.current_step = 0

        return {'success': success, 'msg': response_message}

    @XBlock.json_handler
    def add_group(self, data, suffix=''):
        """
        Add new group in self.groups list
        """

        success = True
        grp_name = data.get('name', '')
        if grp_name not in self.groups:
            msg = self._('Group added successfully.')
            self.groups.append(grp_name)

        else:
            msg = self._('Group already exist.')
            success = False

        return {'success': success, 'msg': msg, 'group_name': grp_name}

    @XBlock.json_handler
    def publish_event(self, data, suffix=''):
        """
        Publish data for analytics purposes
        """
        event_type = data.pop('event_type')
        data['time'] = datetime.now()

        self.runtime.publish(self, event_type, data)
        return {'result': 'ok'}

    def create_submission_data(self):
        """
        Return a complete submission data as quiz completed
        """
        submission = {}
        for question in self.questions:
            question_id = question['id']
            question_data = self.get_question(question_id)
            if self.quiz_type == self.BUZZFEED_QUIZ_VALUE:
                question_answer = self.get_buzzfeed_answer(
                    question_data['choices'],
                    self.student_choices[question_id])
            else:
                question_answer = self.get_diagnostic_answer(
                    question_data['choices'],
                    self.student_choices[question_id])

            submission[question_id] = {
                'question_text': question['text'],
                'answer': question_answer
            }

        return submission
Пример #21
0
class StudioMixin:
    """
    Studio editing view for OpenAssessment XBlock.
    """

    DEFAULT_CRITERIA = [{
        'label': '',
        'options': [
            {
                'label': ''
            },
        ]
    }]

    NECESSITY_OPTIONS = {
        "required": ugettext_lazy("Required"),
        "optional": ugettext_lazy("Optional"),
        "": ugettext_lazy("None")
    }

    STUDIO_EDITING_TEMPLATE = 'openassessmentblock/edit/oa_edit.html'

    BASE_EDITOR_ASSESSMENTS_ORDER = copy.deepcopy(
        DEFAULT_EDITOR_ASSESSMENTS_ORDER)

    # Since the XBlock problem definition contains only assessment
    # modules that are enabled, we need to keep track of the order
    # that the user left assessments in the editor, including
    # the ones that were disabled.  This allows us to keep the order
    # that the user specified.
    editor_assessments_order = List(
        default=DEFAULT_EDITOR_ASSESSMENTS_ORDER,
        scope=Scope.content,
        help="The order to display assessments in the editor.")

    def studio_view(self, context=None):  # pylint: disable=unused-argument
        """
        Render the OpenAssessment XBlock for editing in Studio.

        Args:
            context: Not actively used for this view.

        Returns:
            (Fragment): An HTML fragment for editing the configuration of this XBlock.
        """
        rendered_template = get_template(self.STUDIO_EDITING_TEMPLATE).render(
            self.editor_context())
        fragment = Fragment(rendered_template)

        # TODO: switch to add_javascript_url once XBlock resources are loaded from the CDN
        js_bytes = pkg_resources.resource_string(
            __name__, "static/js/openassessment-studio.js")
        fragment.add_javascript(js_bytes.decode('utf-8'))

        js_context_dict = {
            "ALLOWED_IMAGE_EXTENSIONS": self.ALLOWED_IMAGE_EXTENSIONS,
            "ALLOWED_FILE_EXTENSIONS": self.ALLOWED_FILE_EXTENSIONS,
            "FILE_EXT_BLACK_LIST": self.FILE_EXT_BLACK_LIST,
        }
        fragment.initialize_js('OpenAssessmentEditor', js_context_dict)
        return fragment

    def editor_context(self):
        """
        Update the XBlock's XML.

        Returns:
            dict with keys
                'rubric' (unicode), 'prompt' (unicode), 'title' (unicode),
                'submission_start' (unicode),  'submission_due' (unicode),
                'assessments (dict)

        """
        # In the authoring GUI, date and time fields should never be null.
        # Therefore, we need to resolve all "default" dates to datetime objects
        # before displaying them in the editor.
        __, __, date_ranges = resolve_dates(  # pylint: disable=redeclared-assigned-name
            self.start, self.due,
            [(self.submission_start, self.submission_due)] +
            [(asmnt.get('start'), asmnt.get('due'))
             for asmnt in self.valid_assessments], self._)

        submission_start, submission_due = date_ranges[0]
        assessments = self._assessments_editor_context(date_ranges[1:])
        self.editor_assessments_order = self._editor_assessments_order_context(
        )

        # Every rubric requires one criterion. If there is no criteria
        # configured for the XBlock, return one empty default criterion, with
        # an empty default option.
        criteria = copy.deepcopy(self.rubric_criteria_with_labels)
        if not criteria:
            criteria = self.DEFAULT_CRITERIA

        # To maintain backwards compatibility, if there is no
        # feedback_default_text configured for the xblock, use the default text
        feedback_default_text = copy.deepcopy(
            self.rubric_feedback_default_text)
        if not feedback_default_text:
            feedback_default_text = DEFAULT_RUBRIC_FEEDBACK_TEXT
        course_id = self.location.course_key if hasattr(self,
                                                        'location') else None

        # If allowed file types haven't been explicitly set, load from a preset
        white_listed_file_types = self.get_allowed_file_types_or_preset()
        white_listed_file_types_string = ','.join(
            white_listed_file_types) if white_listed_file_types else ''

        return {
            'prompts':
            self.prompts,
            'prompts_type':
            self.prompts_type,
            'title':
            self.title,
            'submission_due':
            submission_due,
            'submission_start':
            submission_start,
            'assessments':
            assessments,
            'criteria':
            criteria,
            'feedbackprompt':
            self.rubric_feedback_prompt,
            'feedback_default_text':
            feedback_default_text,
            'text_response':
            self.text_response if self.text_response else '',
            'file_upload_response':
            self.file_upload_response if self.file_upload_response else '',
            'necessity_options':
            self.NECESSITY_OPTIONS,
            'file_upload_type':
            self.file_upload_type,
            'allow_multiple_files':
            self.allow_multiple_files,
            'white_listed_file_types':
            white_listed_file_types_string,
            'allow_latex':
            self.allow_latex,
            'leaderboard_show':
            self.leaderboard_show,
            'editor_assessments_order': [
                make_django_template_key(asmnt)
                for asmnt in self.editor_assessments_order
            ],
            'teams_feature_enabled':
            self.team_submissions_enabled,
            'teams_enabled':
            self.teams_enabled,
            'base_asset_url':
            self._get_base_url_path_for_course_assets(course_id),
            'is_released':
            self.is_released(),
            'teamsets':
            self.get_teamsets(course_id),
            'selected_teamset_id':
            self.selected_teamset_id,
        }

    @XBlock.json_handler
    def update_editor_context(self, data, suffix=''):  # pylint: disable=unused-argument
        """
        Update the XBlock's configuration.

        Args:
            data (dict): Data from the request; should have the format described
            in the editor schema.

        Keyword Arguments:
            suffix (str): Not used

        Returns:
            dict with keys 'success' (bool) and 'msg' (str)
        """

        # Validate and sanitize the data using a schema
        # If the data is invalid, this means something is wrong with
        # our JavaScript, so we log an exception.
        try:
            data = EDITOR_UPDATE_SCHEMA(data)
        except MultipleInvalid:
            logger.exception('Editor context is invalid')
            return {
                'success': False,
                'msg': self._('Error updating XBlock configuration')
            }

        # Check that the editor assessment order contains all the assessments.
        current_order = set(data['editor_assessments_order'])
        if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != current_order:
            # Backwards compatibility: "staff-assessment" may not be present.
            # If that is the only problem with this data, just add it manually and continue.
            if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) == current_order | {
                    'staff-assessment'
            }:
                data['editor_assessments_order'].append('staff-assessment')
                logger.info(
                    'Backwards compatibility: editor_assessments_order now contains staff-assessment'
                )
            else:
                logger.exception(
                    'editor_assessments_order does not contain all expected assessment types'
                )
                return {
                    'success': False,
                    'msg': self._('Error updating XBlock configuration')
                }

        if not data['text_response'] and not data['file_upload_response']:
            return {
                'success':
                False,
                'msg':
                self.
                _("Error: both text and file upload responses can't be disabled"
                  )
            }
        if not data['text_response'] and data[
                'file_upload_response'] == 'optional':
            return {
                'success':
                False,
                'msg':
                self.
                _("Error: in case if text response is disabled file upload response must be required"
                  )
            }
        if not data['file_upload_response'] and data[
                'text_response'] == 'optional':
            return {
                'success':
                False,
                'msg':
                self.
                _("Error: in case if file upload response is disabled text response must be required"
                  )
            }

        # Backwards compatibility: We used to treat "name" as both a user-facing label
        # and a unique identifier for criteria and options.
        # Now we treat "name" as a unique identifier, and we've added an additional "label"
        # field that we display to the user.
        # If the JavaScript editor sends us a criterion or option without a "name"
        # field, we should assign it a unique identifier.
        for criterion in data['criteria']:
            if 'name' not in criterion:
                criterion['name'] = uuid4().hex
            for option in criterion['options']:
                if 'name' not in option:
                    option['name'] = uuid4().hex

        xblock_validator = validator(self, self._)
        success, msg = xblock_validator(
            create_rubric_dict(data['prompts'], data['criteria']),
            data['assessments'],
            submission_start=data['submission_start'],
            submission_due=data['submission_due'],
            leaderboard_show=data['leaderboard_show'])
        if not success:
            return {
                'success': False,
                'msg': self._(u'Validation error: {error}').format(error=msg)
            }

        # At this point, all the input data has been validated,
        # so we can safely modify the XBlock fields.
        self.title = data['title']
        self.display_name = data['title']
        self.prompts = data['prompts']
        self.prompts_type = data['prompts_type']
        self.rubric_criteria = data['criteria']
        self.rubric_assessments = data['assessments']
        self.editor_assessments_order = data['editor_assessments_order']
        self.rubric_feedback_prompt = data['feedback_prompt']
        self.rubric_feedback_default_text = data['feedback_default_text']
        self.submission_start = data['submission_start']
        self.submission_due = data['submission_due']
        self.text_response = data['text_response']
        self.file_upload_response = data['file_upload_response']
        if data['file_upload_response']:
            self.file_upload_type = data['file_upload_type']
            self.white_listed_file_types_string = data[
                'white_listed_file_types']
        else:
            self.file_upload_type = None
            self.white_listed_file_types_string = None
        self.allow_multiple_files = bool(data['allow_multiple_files'])
        self.allow_latex = bool(data['allow_latex'])
        self.leaderboard_show = data['leaderboard_show']
        self.teams_enabled = bool(data.get('teams_enabled', False))
        self.selected_teamset_id = data.get('selected_teamset_id', '')

        return {
            'success': True,
            'msg': self._(u'Successfully updated OpenAssessment XBlock')
        }

    @XBlock.json_handler
    def check_released(self, data, suffix=''):  # pylint: disable=unused-argument
        """
        Check whether the problem has been released.

        Args:
            data (dict): Not used

        Keyword Arguments:
            suffix (str): Not used

        Returns:
            dict with keys 'success' (bool), 'message' (unicode), and 'is_released' (bool)
        """
        # There aren't currently any server-side error conditions we report to the client,
        # but we send success/msg values anyway for consistency with other handlers.
        return {'success': True, 'msg': u'', 'is_released': self.is_released()}

    def _assessments_editor_context(self, assessment_dates):
        """
        Transform the rubric assessments list into the context
        we will pass to the Django template.

        Args:
            assessment_dates: List of assessment date ranges (tuples of start/end datetimes).

        Returns:
            dict

        """
        assessments = {}
        for asmnt, date_range in zip(self.rubric_assessments,
                                     assessment_dates):
            # Django Templates cannot handle dict keys with dashes, so we'll convert
            # the dashes to underscores.
            template_name = make_django_template_key(asmnt['name'])
            assessments[template_name] = copy.deepcopy(asmnt)
            assessments[template_name]['start'] = date_range[0]
            assessments[template_name]['due'] = date_range[1]

        # In addition to the data in the student training assessment, we need to include two additional
        # pieces of information: a blank context to render the empty template with, and the criteria
        # for each example (so we don't have any complicated logic within the template). Though this
        # could be accomplished within the template, we are opting to remove logic from the template.
        student_training_module = self.get_assessment_module(
            'student-training')

        student_training_template = {
            'answer': {
                'parts': [{
                    'text': ''
                } for _ in self.prompts]
            }
        }
        criteria_list = copy.deepcopy(self.rubric_criteria_with_labels)
        for criterion in criteria_list:
            criterion['option_selected'] = ""
        student_training_template['criteria'] = criteria_list

        if student_training_module:
            student_training_module = update_assessments_format(
                [student_training_module])[0]
            example_list = []
            # Adds each example to a modified version of the student training module dictionary.
            for example in student_training_module['examples']:
                criteria_list = copy.deepcopy(self.rubric_criteria_with_labels)
                # Equivalent to a Join Query, this adds the selected option to the Criterion's dictionary, so that
                # it can be easily referenced in the template without searching through the selected options.
                for criterion in criteria_list:
                    for option_selected in example['options_selected']:
                        if option_selected['criterion'] == criterion['name']:
                            criterion['option_selected'] = option_selected[
                                'option']
                example_list.append({
                    'answer': example['answer'],
                    'criteria': criteria_list,
                })
            assessments['training'] = {
                'examples': example_list,
                'template': student_training_template
            }
        # If we don't have student training enabled, we still need to render a single (empty, or default) example
        else:
            assessments['training'] = {
                'examples': [student_training_template],
                'template': student_training_template
            }

        return assessments

    def _editor_assessments_order_context(self):
        """
        Create a list of assessment names in the order
        the user last set in the editor, including
        assessments that are not currently enabled.

        Returns:
            list of assessment names

        """
        # Start with the default order, to pick up any assessment types that have been added
        # since the user last saved their ordering.
        effective_order = copy.deepcopy(self.BASE_EDITOR_ASSESSMENTS_ORDER)

        # Account for changes the user has made to the default order
        user_order = copy.deepcopy(self.editor_assessments_order)
        effective_order = self._subset_in_relative_order(
            effective_order, user_order)

        # Account for inconsistencies between the user's order and the problems
        # that are currently enabled in the problem (These cannot be changed)
        enabled_assessments = [
            asmnt['name'] for asmnt in self.valid_assessments
        ]
        enabled_ordered_assessments = [
            assessment for assessment in enabled_assessments
            if assessment in user_order
        ]
        effective_order = self._subset_in_relative_order(
            effective_order, enabled_ordered_assessments)

        return effective_order

    def _subset_in_relative_order(self, superset, subset):
        """
        Returns a copy of superset, with entries that appear in subset being reordered to match
        their relative ordering in subset.
        """
        superset_indices = [superset.index(item) for item in subset]
        sorted_superset_indices = sorted(superset_indices)
        if superset_indices != sorted_superset_indices:
            for index, superset_index in enumerate(sorted_superset_indices):
                superset[superset_index] = subset[index]
        return superset

    def _get_base_url_path_for_course_assets(self, course_key):
        """
        Returns base url path for course assets
        """
        if course_key is None:
            return None

        placeholder_id = uuid4().hex
        # create a dummy asset location with a fake but unique name. strip off the name, and return it
        url_path = str(
            course_key.make_asset_key('asset',
                                      placeholder_id).for_branch(None))
        if not url_path.startswith('/'):
            url_path = '/' + url_path
        return url_path.replace(placeholder_id, '')

    def get_team_configuration(self, course_id):
        """
        Returns a dict with team configuration settings.
        """
        configuration_service = self.runtime.service(self,
                                                     'teams_configuration')
        team_configuration = configuration_service.get_teams_configuration(
            course_id)
        if not team_configuration:
            return None
        return team_configuration

    def get_teamsets(self, course_id):
        """
        Wrapper around get_team_configuration that returns team names only for display
        """
        team_configuration = self.get_team_configuration(course_id)
        if not team_configuration:
            return None
        return team_configuration.teamsets
Пример #22
0
class VideoFields(object):
    """Fields for `VideoModule` and `VideoDescriptor`."""
    display_name = String(display_name="Display Name",
                          help="Display name for this module.",
                          default="Video",
                          scope=Scope.settings)
    position = Integer(help="Current position in the video",
                       scope=Scope.user_state,
                       default=0)
    show_captions = Boolean(
        help="This controls whether or not captions are shown by default.",
        display_name="Show Transcript",
        scope=Scope.settings,
        default=True)
    # TODO: This should be moved to Scope.content, but this will
    # require data migration to support the old video module.
    youtube_id_1_0 = String(
        help="This is the Youtube ID reference for the normal speed video.",
        display_name="Youtube ID",
        scope=Scope.settings,
        default="OEoXaMPEzfM")
    youtube_id_0_75 = String(
        help=
        "Optional, for older browsers: the Youtube ID for the .75x speed video.",
        display_name="Youtube ID for .75x speed",
        scope=Scope.settings,
        default="")
    youtube_id_1_25 = String(
        help=
        "Optional, for older browsers: the Youtube ID for the 1.25x speed video.",
        display_name="Youtube ID for 1.25x speed",
        scope=Scope.settings,
        default="")
    youtube_id_1_5 = String(
        help=
        "Optional, for older browsers: the Youtube ID for the 1.5x speed video.",
        display_name="Youtube ID for 1.5x speed",
        scope=Scope.settings,
        default="")
    start_time = RelativeTime(  # datetime.timedelta object
        help="Start time for the video (HH:MM:SS).",
        display_name="Start Time",
        scope=Scope.settings,
        default=datetime.timedelta(seconds=0))
    end_time = RelativeTime(  # datetime.timedelta object
        help="End time for the video (HH:MM:SS).",
        display_name="End Time",
        scope=Scope.settings,
        default=datetime.timedelta(seconds=0))
    #front-end code of video player checks logical validity of (start_time, end_time) pair.

    source = String(
        help=
        "The external URL to download the video. This appears as a link beneath the video.",
        display_name="Download Video",
        scope=Scope.settings,
        default="")
    html5_sources = List(
        help=
        "A list of filenames to be used with HTML5 video. The first supported filetype will be displayed.",
        display_name="Video Sources",
        scope=Scope.settings,
    )
    track = String(
        help=
        "The external URL to download the timed transcript track. This appears as a link beneath the video.",
        display_name="Download Transcript",
        scope=Scope.settings,
        default="")
    sub = String(
        help="The name of the timed transcript track (for non-Youtube videos).",
        display_name="HTML5 Transcript",
        scope=Scope.settings,
        default="")
Пример #23
0
class MentoringBlock(
    StudentViewUserStateResultsTransformerMixin,
    BaseMentoringBlock, StudioContainerWithNestedXBlocksMixin, StepParentMixin,
):
    """
    An XBlock providing mentoring capabilities

    Composed of text, answers input fields, and a set of MRQ/MCQ with advices.
    A set of conditions on the provided answers and MCQ/MRQ choices will determine if the
    student is a) provided mentoring advices and asked to alter his answer, or b) is given the
    ok to continue.
    """
    # Content
    USER_STATE_FIELDS = ['completed', 'num_attempts', 'student_results']
    followed_by = String(
        display_name=_("Followed by"),
        help=_("url_name of the step after the current mentoring block in workflow."),
        default=None,
        scope=Scope.content
    )
    enforce_dependency = Boolean(
        display_name=_("Enforce Dependency"),
        help=_("Should the next step be the current block to complete?"),
        default=False,
        scope=Scope.content,
        enforce_type=True
    )
    display_submit = Boolean(
        display_name=_("Show Submit Button"),
        help=_("Allow submission of the current block?"),
        default=True,
        scope=Scope.content,
        enforce_type=True
    )
    xml_content = String(
        display_name=_("XML content"),
        help=_("Not used for version 2. This field is here only to preserve the data needed to upgrade from v1 to v2."),
        default='',
        scope=Scope.content,
        multiline_editor=True
    )

    # Settings
    display_name = String(
        display_name=_("Title (Display name)"),
        help=_("Title to display"),
        default=_("Problem Builder"),
        scope=Scope.settings
    )
    feedback_label = String(
        display_name=_("Feedback Header"),
        help=_("Header for feedback messages"),
        default=_("Feedback"),
        scope=Scope.content
    )

    # User state
    attempted = Boolean(
        # Has the student attempted this mentoring step?
        default=False,
        scope=Scope.user_state
        # TODO: Does anything use this 'attempted' field? May want to delete it.
    )
    completed = Boolean(
        # Has the student completed this mentoring step?
        default=False,
        scope=Scope.user_state
    )
    step = Integer(
        # Keep track of the student assessment progress.
        default=0,
        scope=Scope.user_state,
        enforce_type=True
    )
    student_results = List(
        # Store results of student choices.
        default=[],
        scope=Scope.user_state
    )
    extended_feedback = Boolean(
        help=_("Show extended feedback details when all attempts are used up."),
        default=False,
        Scope=Scope.content
    )

    # Global user state
    next_step = String(
        # url_name of the next step the student must complete (global to all blocks)
        default='mentoring_first',
        scope=Scope.preferences
    )

    editable_fields = (
        'display_name', 'followed_by', 'max_attempts', 'enforce_dependency',
        'display_submit', 'feedback_label', 'weight', 'extended_feedback'
    )

    @property
    def allowed_nested_blocks(self):
        """
        Returns a list of allowed nested XBlocks. Each item can be either
        * An XBlock class
        * A NestedXBlockSpec

        If XBlock class is used it is assumed that this XBlock is enabled and allows multiple instances.
        NestedXBlockSpec allows explicitly setting disabled/enabled state, disabled reason (if any) and single/multiple
        instances
        """
        additional_blocks = []
        try:
            from xmodule.video_module.video_module import VideoDescriptor
            additional_blocks.append(NestedXBlockSpec(
                VideoDescriptor, category='video', label=_(u"Video")
            ))
        except ImportError:
            pass
        try:
            from imagemodal import ImageModal
            additional_blocks.append(NestedXBlockSpec(
                ImageModal, category='imagemodal', label=_(u"Image Modal")
            ))
        except ImportError:
            pass

        try:
            from xblock_django.models import XBlockConfiguration
            opt = XBlockConfiguration.objects.filter(name="pb-swipe")
            if opt.count() and opt.first().enabled:
                additional_blocks.append(SwipeBlock)
        except ImportError:
            pass

        try:
            from ooyala_player import OoyalaPlayerBlock
            additional_blocks.append(NestedXBlockSpec(
                OoyalaPlayerBlock, category='ooyala-player', label=_(u"Ooyala Player")
            ))
        except ImportError:
            pass

        message_block_shims = [
            NestedXBlockSpec(
                MentoringMessageBlock,
                category='pb-message',
                boilerplate=message_type,
                label=get_message_label(message_type),
            )
            for message_type in (
                'completed',
                'incomplete',
                'max_attempts_reached',
            )
        ]

        return [
            NestedXBlockSpec(AnswerBlock, boilerplate='studio_default'),
            MCQBlock, RatingBlock, MRQBlock, CompletionBlock,
            NestedXBlockSpec(None, category="html", label=self._("HTML")),
            AnswerRecapBlock, MentoringTableBlock, PlotBlock, SliderBlock
        ] + additional_blocks + message_block_shims

    def get_question_number(self, question_id):
        """
        Get the step number of the question id
        """
        for child_id in self.children:
            question = self.runtime.get_block(child_id)
            if isinstance(question, QuestionMixin) and (question.name == question_id):
                return question.step_number
        raise ValueError("Question ID in answer set not a step of this Mentoring Block!")

    def answer_mapper(self, answer_status):
        """
        Create a JSON-dumpable object with readable key names from a list of student answers.
        """
        answer_map = []
        for answer in self.student_results:
            if answer[1]['status'] == answer_status:
                try:
                    answer_map.append({
                        'number': self.get_question_number(answer[0]),
                        'id': answer[0],
                        'details': answer[1],
                    })
                except ValueError:
                    pass  # The question has been deleted since the student answered it.
        return answer_map

    @property
    def score(self):
        """Compute the student score taking into account the weight of each step."""
        steps = self.steps
        steps_map = {q.name: q for q in steps}
        total_child_weight = sum(float(step.weight) for step in steps)
        if total_child_weight == 0:
            return Score(0, 0, [], [], [])
        points_earned = 0
        for q_name, q_details in self.student_results:
            question = steps_map.get(q_name)
            if question:
                points_earned += q_details['score'] * question.weight
        score = points_earned / total_child_weight
        correct = self.answer_mapper(CORRECT)
        incorrect = self.answer_mapper(INCORRECT)
        partially_correct = self.answer_mapper(PARTIAL)

        return Score(score, int(round(score * 100)), correct, incorrect, partially_correct)

    @XBlock.supports("multi_device")  # Mark as mobile-friendly
    def student_view(self, context):
        from .questionnaire import QuestionnaireAbstractBlock  # Import here to avoid circular dependency

        # Migrate stored data if necessary
        self.migrate_fields()

        # Validate self.step:
        num_steps = len(self.steps)
        if self.step > num_steps:
            self.step = num_steps

        fragment = Fragment()
        child_content = u""

        mcq_hide_previous_answer = self.get_option('pb_mcq_hide_previous_answer')

        for child_id in self.children:
            child = self.runtime.get_block(child_id)
            if child is None:  # child should not be None but it can happen due to bugs or permission issues
                child_content += u"<p>[{}]</p>".format(self._(u"Error: Unable to load child component."))
            elif not isinstance(child, MentoringMessageBlock):
                try:
                    if mcq_hide_previous_answer and isinstance(child, QuestionnaireAbstractBlock):
                        context['hide_prev_answer'] = True
                    else:
                        context['hide_prev_answer'] = False
                    child_fragment = child.render('mentoring_view', context)
                except NoSuchViewError:
                    if child.scope_ids.block_type == 'html' and getattr(self.runtime, 'is_author_mode', False):
                        # html block doesn't support mentoring_view, and if we use student_view Studio will wrap
                        # it in HTML that we don't want in the preview. So just render its HTML directly:
                        child_fragment = Fragment(child.data)
                    else:
                        child_fragment = child.render('student_view', context)
                fragment.add_frag_resources(child_fragment)
                child_content += child_fragment.content

        fragment.add_content(loader.render_template('templates/html/mentoring.html', {
            'self': self,
            'title': self.display_name,
            'show_title': self.show_title,
            'child_content': child_content,
            'missing_dependency_url': self.has_missing_dependency and self.next_step_url,
        }))
        fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/problem-builder.css'))
        fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/lms.css'))
        fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/vendor/underscore-min.js'))
        fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/util.js'))
        fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/mentoring_standard_view.js'))
        fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/mentoring.js'))
        fragment.add_resource(loader.load_unicode('templates/html/mentoring_attempts.underscore'), "text/html")

        # Workbench doesn't have font awesome, so add it:
        if WorkbenchRuntime and isinstance(self.runtime, WorkbenchRuntime):
            fragment.add_css_url('//maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css')

        fragment.initialize_js('MentoringBlock')

        if not self.display_submit:
            self.runtime.publish(self, 'progress', {})

        return fragment

    def migrate_fields(self):
        """
        Migrate data stored in the fields, when a format change breaks backward-compatibility with
        previous data formats
        """
        # Partial answers replaced the `completed` with `status` in `self.student_results`
        if self.student_results and 'completed' in self.student_results[0][1]:
            # Rename the field and use the new value format (text instead of boolean)
            for result in self.student_results:
                result[1]['status'] = 'correct' if result[1]['completed'] else 'incorrect'
                del result[1]['completed']

    @property
    def additional_publish_event_data(self):
        return {
            'user_id': self.scope_ids.user_id,
            'component_id': self.url_name,
        }

    @property
    def has_missing_dependency(self):
        """
        Returns True if the student needs to complete another step before being able to complete
        the current one, and False otherwise
        """
        return self.enforce_dependency and (not self.completed) and (self.next_step != self.url_name)

    @property
    def next_step_url(self):
        """
        Returns the URL of the next step's page
        """
        return '/jump_to_id/{}'.format(self.next_step)

    @property
    def hide_feedback(self):
        return self.get_option("pb_hide_feedback_if_attempts_remain") and not self.max_attempts_reached

    def get_message(self, completed):
        """
        Get the message to display to a student following a submission.
        """
        if completed:
            # Student has achieved a perfect score
            return self.get_message_content('completed')
        elif self.max_attempts_reached:
            # Student has not achieved a perfect score and cannot try again
            return self.get_message_content('max_attempts_reached')
        else:
            # Student did not achieve a perfect score but can try again:
            return self.get_message_content('incomplete')

    @property
    def review_tips(self):
        review_tips = []
        return review_tips

    def show_extended_feedback(self):
        return self.extended_feedback and self.max_attempts_reached

    @XBlock.json_handler
    def get_results(self, queries, suffix=''):
        """
        Gets detailed results in the case of extended feedback.

        Right now there are two ways to get results-- through the template upon loading up
        the mentoring block, or after submission of an AJAX request like in
        submit or get_results here.
        """
        results, completed, show_message = self._get_standard_results()
        mentoring_completed = completed

        result = {
            'results': results,
            'completed': completed,
            'step': self.step,
            'max_attempts': self.max_attempts,
            'num_attempts': self.num_attempts,
        }

        if show_message:
            result['message'] = self.get_message(mentoring_completed)

        return result

    def _get_standard_results(self):
        """
        Gets previous submissions results as if submit was called with exactly the same values as last time.
        """
        results = []
        completed = True
        show_message = (not self.hide_feedback) and bool(self.student_results)

        # All children are visible simultaneously, so need to collect results for all of them
        for child in self.steps:
            child_result = child.get_last_result()
            results.append([child.name, child_result])
            completed = completed and (child_result.get('status', None) == 'correct')

        return results, completed, show_message

    @XBlock.json_handler
    def submit(self, submissions, suffix=''):
        log.info(u'Received submissions: {}'.format(submissions))
        # server-side check that the user is allowed to submit:
        if self.max_attempts_reached:
            raise JsonHandlerError(403, "Maximum number of attempts already reached.")
        elif self.has_missing_dependency:
            raise JsonHandlerError(
                403,
                "You need to complete all previous steps before being able to complete the current one."
            )

        # This has now been attempted:
        self.attempted = True

        submit_results = []
        previously_completed = self.completed
        completed = True
        for child in self.steps:
            if child.name and child.name in submissions:
                submission = submissions[child.name]
                child_result = child.submit(submission)
                submit_results.append([child.name, child_result])
                child.save()
                completed = completed and (child_result['status'] == 'correct')

        if completed and self.next_step == self.url_name:
            self.next_step = self.followed_by

        # Update the score and attempts, unless the user had already achieved a perfect score ("completed"):
        if not previously_completed:
            # Update the results
            while self.student_results:
                self.student_results.pop()
            for result in submit_results:
                self.student_results.append(result)

            # Save the user's latest score
            self.runtime.publish(self, 'grade', {
                'value': self.score.raw,
                'max_value': self.max_score(),
            })

            # Mark this as having used an attempt:
            if self.max_attempts > 0:
                self.num_attempts += 1

        # Save the completion status.
        # Once it has been completed once, keep completion even if user changes values
        self.completed = bool(completed) or previously_completed

        message = self.get_message(completed)
        raw_score = self.score.raw

        self.runtime.publish(self, 'xblock.problem_builder.submitted', {
            'num_attempts': self.num_attempts,
            'submitted_answer': submissions,
            'grade': raw_score,
        })

        return {
            'results': submit_results,
            'completed': self.completed,
            'message': message,
            'max_attempts': self.max_attempts,
            'num_attempts': self.num_attempts,
        }

    def feedback_dispatch(self, target_data, stringify):
        if self.show_extended_feedback():
            if stringify:
                return json.dumps(target_data)
            else:
                return target_data

    def correct_json(self, stringify=True):
        return self.feedback_dispatch(self.score.correct, stringify)

    def incorrect_json(self, stringify=True):
        return self.feedback_dispatch(self.score.incorrect, stringify)

    def partial_json(self, stringify=True):
        return self.feedback_dispatch(self.score.partially_correct, stringify)

    @XBlock.json_handler
    def try_again(self, data, suffix=''):

        if self.max_attempts_reached:
            return {
                'result': 'error',
                'message': 'max attempts reached'
            }

        # reset
        self.step = 0
        self.completed = False

        while self.student_results:
            self.student_results.pop()

        return {
            'result': 'success'
        }

    def validate(self):
        """
        Validates the state of this XBlock except for individual field values.
        """
        validation = super(MentoringBlock, self).validate()
        a_child_has_issues = False
        message_types_present = set()
        for child_id in self.children:
            child = self.runtime.get_block(child_id)
            # Check if the child has any errors:
            if not child.validate().empty:
                a_child_has_issues = True
            # Ensure there is only one "message" block of each type:
            if isinstance(child, MentoringMessageBlock):
                msg_type = child.type
                if msg_type in message_types_present:
                    validation.add(ValidationMessage(
                        ValidationMessage.ERROR,
                        self._(u"There should only be one '{msg_type}' message component.").format(msg_type=msg_type)
                    ))
                message_types_present.add(msg_type)
        if a_child_has_issues:
            validation.add(ValidationMessage(
                ValidationMessage.ERROR,
                self._(u"A component inside this mentoring block has issues.")
            ))
        return validation

    def author_edit_view(self, context):
        """
        Add some HTML to the author view that allows authors to add child blocks.
        """
        local_context = context.copy()
        local_context['author_edit_view'] = True
        fragment = super(MentoringBlock, self).author_edit_view(local_context)
        fragment.add_content(loader.render_template('templates/html/mentoring_url_name.html', {
            'url_name': self.url_name
        }))
        fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/problem-builder.css'))
        fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/problem-builder-edit.css'))
        fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/problem-builder-tinymce-content.css'))
        fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/util.js'))
        fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/container_edit.js'))
        fragment.initialize_js('ProblemBuilderContainerEdit')

        return fragment

    @staticmethod
    def workbench_scenarios():
        """
        Scenarios displayed by the workbench. Load them from external (private) repository
        """
        return loader.load_scenarios_from_path('templates/xml')

    def student_view_data(self, context=None):
        """
        Returns a JSON representation of the student_view of this XBlock,
        retrievable from the Course Block API.
        """
        components = []
        for child_id in self.children:
            block = self.runtime.get_block(child_id)
            if hasattr(block, 'student_view_data'):
                components.append(block.student_view_data())

        return {
            'block_id': unicode(self.scope_ids.usage_id),
            'display_name': self.display_name,
            'max_attempts': self.max_attempts,
            'extended_feedback': self.extended_feedback,
            'feedback_label': self.feedback_label,
            'components': components,
            'messages': {
                message_type: self.get_message_content(message_type)
                for message_type in (
                        'completed',
                        'incomplete',
                        'max_attempts_reached',
                )
            }
        }
Пример #24
0
 class TestBlock(XBlock):
     """
     Block for testing
     """
     field_x = List()
Пример #25
0
class StudioMixin(object):
    """
    Studio editing view for OpenAssessment XBlock.
    """

    DEFAULT_CRITERIA = [
        {
            'label': '',
            'options': [
                {
                    'label': ''
                },
            ]
        }
    ]

    # Since the XBlock problem definition contains only assessment
    # modules that are enabled, we need to keep track of the order
    # that the user left assessments in the editor, including
    # the ones that were disabled.  This allows us to keep the order
    # that the user specified.
    editor_assessments_order = List(
        default=DEFAULT_EDITOR_ASSESSMENTS_ORDER,
        scope=Scope.content,
        help="The order to display assessments in the editor."
    )

    def studio_view(self, context=None):
        """
        Render the OpenAssessment XBlock for editing in Studio.

        Args:
            context: Not actively used for this view.

        Returns:
            (Fragment): An HTML fragment for editing the configuration of this XBlock.
        """
        rendered_template = get_template(
            'openassessmentblock/edit/oa_edit.html'
        ).render(Context(self.editor_context()))
        frag = Fragment(rendered_template)
        frag.add_javascript(pkg_resources.resource_string(__name__, "static/js/openassessment-studio.min.js"))
        frag.initialize_js('OpenAssessmentEditor')
        return frag

    def editor_context(self):
        """
        Update the XBlock's XML.

        Args:
            data (dict): Data from the request; should have a value for the key 'xml'
                containing the XML for this XBlock.

        Keyword Arguments:
            suffix (str): Not used

        Returns:
            dict with keys
                'rubric' (unicode), 'prompt' (unicode), 'title' (unicode),
                'submission_start' (unicode),  'submission_due' (unicode),
                'assessments (dict)

        """
        # In the authoring GUI, date and time fields should never be null.
        # Therefore, we need to resolve all "default" dates to datetime objects
        # before displaying them in the editor.
        __, __, date_ranges = resolve_dates(
            self.start, self.due,
            [(self.submission_start, self.submission_due)] +
            [(asmnt.get('start'), asmnt.get('due')) for asmnt in self.valid_assessments],
            self._
        )

        submission_start, submission_due = date_ranges[0]
        assessments = self._assessments_editor_context(date_ranges[1:])
        editor_assessments_order = self._editor_assessments_order_context()

        # Every rubric requires one criterion. If there is no criteria
        # configured for the XBlock, return one empty default criterion, with
        # an empty default option.
        criteria = copy.deepcopy(self.rubric_criteria_with_labels)
        if not criteria:
            criteria = self.DEFAULT_CRITERIA

        # To maintain backwards compatibility, if there is no 
        # feedback_default_text configured for the xblock, use the default text
        feedback_default_text = copy.deepcopy(self.rubric_feedback_default_text)
        if not feedback_default_text:
            feedback_default_text = DEFAULT_RUBRIC_FEEDBACK_TEXT

        return {
            'prompt': self.prompt,
            'title': self.title,
            'submission_due': submission_due,
            'submission_start': submission_start,
            'assessments': assessments,
            'criteria': criteria,
            'feedbackprompt': self.rubric_feedback_prompt,
            'feedback_default_text': feedback_default_text,
            'allow_file_upload': self.allow_file_upload,
            'allow_latex': self.allow_latex,
            'leaderboard_show': self.leaderboard_show,
            'editor_assessments_order': [
                make_django_template_key(asmnt)
                for asmnt in editor_assessments_order
            ],
        }

    @XBlock.json_handler
    def update_editor_context(self, data, suffix=''):
        """
        Update the XBlock's configuration.

        Args:
            data (dict): Data from the request; should have the format described
            in the editor schema.

        Keyword Arguments:
            suffix (str): Not used

        Returns:
            dict with keys 'success' (bool) and 'msg' (str)
        """

        # Validate and sanitize the data using a schema
        # If the data is invalid, this means something is wrong with
        # our JavaScript, so we log an exception.
        try:
            data = EDITOR_UPDATE_SCHEMA(data)
        except MultipleInvalid:
            logger.exception('Editor context is invalid')
            return {'success': False, 'msg': self._('Error updating XBlock configuration')}

        # Check that the editor assessment order contains all the assessments.  We are more flexible on example-based.
        if set(DEFAULT_EDITOR_ASSESSMENTS_ORDER) != (set(data['editor_assessments_order']) - {'example-based-assessment'}):
            logger.exception('editor_assessments_order does not contain all expected assessment types')
            return {'success': False, 'msg': self._('Error updating XBlock configuration')}

        # Backwards compatibility: We used to treat "name" as both a user-facing label
        # and a unique identifier for criteria and options.
        # Now we treat "name" as a unique identifier, and we've added an additional "label"
        # field that we display to the user.
        # If the JavaScript editor sends us a criterion or option without a "name"
        # field, we should assign it a unique identifier.
        for criterion in data['criteria']:
            if 'name' not in criterion:
                criterion['name'] = uuid4().hex
            for option in criterion['options']:
                if 'name' not in option:
                    option['name'] = uuid4().hex

        # If example based assessment is enabled, we replace it's xml definition with the dictionary
        # definition we expect for validation and storing.
        for assessment in data['assessments']:
            if assessment['name'] == 'example-based-assessment':
                try:
                    assessment['examples'] = parse_examples_from_xml_str(assessment['examples_xml'])
                except UpdateFromXmlError:
                    return {'success': False, 'msg': self._(
                        u'Validation error: There was an error in the XML definition of the '
                        u'examples provided by the user. Please correct the XML definition before saving.')
                    }
                except KeyError:
                    return {'success': False, 'msg': self._(
                        u'Validation error: No examples were provided for example based assessment.'
                    )}
                    # This is where we default to EASE for problems which are edited in the GUI
                assessment['algorithm_id'] = 'ease'

        xblock_validator = validator(self, self._)
        success, msg = xblock_validator(
            create_rubric_dict(data['prompt'], data['criteria']),
            data['assessments'],
            submission_start=data['submission_start'],
            submission_due=data['submission_due'],
            leaderboard_show=data['leaderboard_show']
        )
        if not success:
            return {'success': False, 'msg': self._('Validation error: {error}').format(error=msg)}

        # At this point, all the input data has been validated,
        # so we can safely modify the XBlock fields.
        self.title = data['title']
        self.display_name = data['title']
        self.prompt = data['prompt']
        self.rubric_criteria = data['criteria']
        self.rubric_assessments = data['assessments']
        self.editor_assessments_order = data['editor_assessments_order']
        self.rubric_feedback_prompt = data['feedback_prompt']
        self.rubric_feedback_default_text = data['feedback_default_text']
        self.submission_start = data['submission_start']
        self.submission_due = data['submission_due']
        self.allow_file_upload = bool(data['allow_file_upload'])
        self.allow_latex = bool(data['allow_latex'])
        self.leaderboard_show = data['leaderboard_show']

        return {'success': True, 'msg': self._(u'Successfully updated OpenAssessment XBlock')}

    @XBlock.json_handler
    def check_released(self, data, suffix=''):
        """
        Check whether the problem has been released.

        Args:
            data (dict): Not used

        Keyword Arguments:
            suffix (str): Not used

        Returns:
            dict with keys 'success' (bool), 'message' (unicode), and 'is_released' (bool)
        """
        # There aren't currently any server-side error conditions we report to the client,
        # but we send success/msg values anyway for consistency with other handlers.
        return {
            'success': True, 'msg': u'',
            'is_released': self.is_released()
        }

    def _assessments_editor_context(self, assessment_dates):
        """
        Transform the rubric assessments list into the context
        we will pass to the Django template.

        Args:
            assessment_dates: List of assessment date ranges (tuples of start/end datetimes).

        Returns:
            dict

        """
        assessments = {}
        for asmnt, date_range in zip(self.rubric_assessments, assessment_dates):
            # Django Templates cannot handle dict keys with dashes, so we'll convert
            # the dashes to underscores.
            template_name = make_django_template_key(asmnt['name'])
            assessments[template_name] = copy.deepcopy(asmnt)
            assessments[template_name]['start'] = date_range[0]
            assessments[template_name]['due'] = date_range[1]

        # In addition to the data in the student training assessment, we need to include two additional
        # pieces of information: a blank context to render the empty template with, and the criteria
        # for each example (so we don't have any complicated logic within the template). Though this
        # could be accomplished within the template, we are opting to remove logic from the template.
        student_training_module = self.get_assessment_module('student-training')

        student_training_template = {'answer': ""}
        criteria_list = copy.deepcopy(self.rubric_criteria_with_labels)
        for criterion in criteria_list:
            criterion['option_selected'] = ""
        student_training_template['criteria'] = criteria_list

        if student_training_module:
            example_list = []
            # Adds each example to a modified version of the student training module dictionary.
            for example in student_training_module['examples']:
                criteria_list = copy.deepcopy(self.rubric_criteria_with_labels)
                # Equivalent to a Join Query, this adds the selected option to the Criterion's dictionary, so that
                # it can be easily referenced in the template without searching through the selected options.
                for criterion in criteria_list:
                    for option_selected in example['options_selected']:
                        if option_selected['criterion'] == criterion['name']:
                            criterion['option_selected'] = option_selected['option']
                example_list.append({
                    'answer': example['answer'],
                    'criteria': criteria_list,
                })
            assessments['training'] = {'examples': example_list, 'template': student_training_template}
        # If we don't have student training enabled, we still need to render a single (empty, or default) example
        else:
            assessments['training'] = {'examples': [student_training_template], 'template': student_training_template}

        example_based_assessment = self.get_assessment_module('example-based-assessment')

        if example_based_assessment:
            assessments['example_based_assessment'] = {
                'examples': serialize_examples_to_xml_str(example_based_assessment)
            }

        return assessments

    def _editor_assessments_order_context(self):
        """
        Create a list of assessment names in the order
        the user last set in the editor, including
        assessments that are not currently enabled.

        Returns:
            list of assessment names

        """
        order = copy.deepcopy(self.editor_assessments_order)
        used_assessments = [asmnt['name'] for asmnt in self.valid_assessments]
        default_editor_order = copy.deepcopy(DEFAULT_EDITOR_ASSESSMENTS_ORDER)

        # Backwards compatibility:
        # If the problem already contains example-based assessment
        # then allow the editor to display example-based assessments.
        if 'example-based-assessment' in used_assessments:
            default_editor_order.insert(0, 'example-based-assessment')

        # Backwards compatibility:
        # If the editor assessments order doesn't match the problem order,
        # fall back to the problem order.
        # This handles the migration of problems created pre-authoring,
        # which will have the default editor order.
        problem_order_indices = [
            order.index(asmnt_name) for asmnt_name in used_assessments
            if asmnt_name in order
        ]
        if problem_order_indices != sorted(problem_order_indices):
            unused_assessments = list(set(default_editor_order) - set(used_assessments))
            return sorted(unused_assessments) + used_assessments

        # Forwards compatibility:
        # Include any additional assessments that may have been added since the problem was created.
        else:
            return order + list(set(default_editor_order) - set(order))
Пример #26
0
 class TestBlock(XBlock):
     """
     Block for testing
     """
     field_x = List(display_name="Field Known as X")
class OpenAssessmentBlock(MessageMixin,
                          SubmissionMixin,
                          PeerAssessmentMixin,
                          SelfAssessmentMixin,
                          StaffAssessmentMixin,
                          StudioMixin,
                          GradeMixin,
                          LeaderboardMixin,
                          StaffAreaMixin,
                          WorkflowMixin,
                          TeamWorkflowMixin,
                          StudentTrainingMixin,
                          LmsCompatibilityMixin,
                          CourseItemsListingMixin,
                          ConfigMixin,
                          TeamMixin,
                          XBlock):
    """Displays a prompt and provides an area where students can compose a response."""

    VALID_ASSESSMENT_TYPES = [
        "student-training",
        "peer-assessment",
        "self-assessment",
        "staff-assessment",
    ]

    VALID_ASSESSMENT_TYPES_FOR_TEAMS = [  # pylint: disable=invalid-name
        'staff-assessment',
    ]

    public_dir = 'static'

    submission_start = String(
        default=DEFAULT_START, scope=Scope.settings,
        help="ISO-8601 formatted string representing the submission start date."
    )

    submission_due = String(
        default=DEFAULT_DUE, scope=Scope.settings,
        help="ISO-8601 formatted string representing the submission due date."
    )

    text_response_raw = String(
        help="Specify whether learners must include a text based response to this problem's prompt.",
        default="required",
        scope=Scope.settings
    )

    file_upload_response_raw = String(
        help="Specify whether learners are able to upload files as a part of their response.",
        default=None,
        scope=Scope.settings
    )

    allow_file_upload = Boolean(
        default=False,
        scope=Scope.content,
        help="Do not use. For backwards compatibility only."
    )

    file_upload_type_raw = String(
        default=None,
        scope=Scope.content,
        help="File upload to be included with submission (can be 'image', 'pdf-and-image', or 'custom')."
    )

    white_listed_file_types = List(
        default=[],
        scope=Scope.content,
        help="Custom list of file types allowed with submission."
    )

    allow_latex = Boolean(
        default=False,
        scope=Scope.settings,
        help="Latex rendering allowed with submission."
    )

    title = String(
        default="Open Response Assessment",
        scope=Scope.content,
        help="A title to display to a student (plain text)."
    )

    leaderboard_show = Integer(
        default=0,
        scope=Scope.content,
        help="The number of leaderboard results to display (0 if none)"
    )

    prompt = String(
        default=DEFAULT_PROMPT,
        scope=Scope.content,
        help="The prompts to display to a student."
    )

    prompts_type = String(
        default='text',
        scope=Scope.content,
        help="The type of prompt. html or text"
    )

    rubric_criteria = List(
        default=DEFAULT_RUBRIC_CRITERIA,
        scope=Scope.content,
        help="The different parts of grading for students giving feedback."
    )

    rubric_feedback_prompt = String(
        default=DEFAULT_RUBRIC_FEEDBACK_PROMPT,
        scope=Scope.content,
        help="The rubric feedback prompt displayed to the student"
    )

    rubric_feedback_default_text = String(
        default=DEFAULT_RUBRIC_FEEDBACK_TEXT,
        scope=Scope.content,
        help="The default rubric feedback text displayed to the student"
    )

    rubric_assessments = List(
        default=DEFAULT_ASSESSMENT_MODULES,
        scope=Scope.content,
        help="The requested set of assessments and the order in which to apply them."
    )

    submission_uuid = String(
        default=None,
        scope=Scope.user_state,
        help="The student's submission that others will be assessing."
    )

    has_saved = Boolean(
        default=False,
        scope=Scope.user_state,
        help="Indicates whether the user has saved a response."
    )

    saved_response = String(
        default=u"",
        scope=Scope.user_state,
        help="Saved response submission for the current user."
    )

    saved_files_descriptions = String(
        default=u"",
        scope=Scope.user_state,
        help="Saved descriptions for each uploaded file."
    )

    saved_files_names = String(
        default=u"",
        scope=Scope.user_state,
        help="Saved original names for each uploaded file."
    )

    saved_files_sizes = String(
        default=u"",
        scope=Scope.user_state,
        help="Filesize of each uploaded file in bytes."
    )

    no_peers = Boolean(
        default=False,
        scope=Scope.user_state,
        help="Indicates whether or not there are peers to grade."
    )

    teams_enabled = Boolean(
        default=False,
        scope=Scope.settings,
        help="Whether team submissions are enabled for this case study.",
    )

    selected_teamset_id = String(
        default=u"",
        scope=Scope.settings,
        help="The id of the selected teamset.",
    )

    @property
    def course_id(self):
        return str(self.xmodule_runtime.course_id)  # pylint: disable=no-member

    @property
    def text_response(self):
        """
        Backward compatibility for existing blocks that were created without text_response
        or file_upload_response fields. These blocks will be treated as required text.
        """
        if not self.file_upload_response_raw and not self.text_response_raw:
            return 'required'
        return self.text_response_raw

    @text_response.setter
    def text_response(self, value):
        """
        Setter for text_response_raw
        """
        self.text_response_raw = value if value else None

    @property
    def file_upload_response(self):
        """
        Backward compatibility for existing block before that were created without
        'text_response' and 'file_upload_response_raw' fields.
        """
        if not self.file_upload_response_raw and (self.file_upload_type_raw is not None or self.allow_file_upload):
            return 'optional'
        return self.file_upload_response_raw

    @file_upload_response.setter
    def file_upload_response(self, value):
        """
        Setter for file_upload_response_raw
        """
        self.file_upload_response_raw = value if value else None

    @property
    def file_upload_type(self):
        """
        Backward compatibility for existing block before the change from allow_file_upload to file_upload_type_raw.

        This property will use new file_upload_type_raw field when available, otherwise will fall back to
        allow_file_upload field for old blocks.
        """
        if self.file_upload_type_raw is not None:
            return self.file_upload_type_raw
        if self.allow_file_upload:
            return 'image'
        return None

    @file_upload_type.setter
    def file_upload_type(self, value):
        """
        Setter for file_upload_type_raw
        """
        self.file_upload_type_raw = value

    @property
    def white_listed_file_types_string(self):
        """
        Join the white listed file types into comma delimited string
        """
        if self.white_listed_file_types:
            return ','.join(self.white_listed_file_types)
        return ''

    @white_listed_file_types_string.setter
    def white_listed_file_types_string(self, value):
        """
        Convert comma delimited white list string into list with some clean up
        """
        self.white_listed_file_types = [file_type.strip().strip('.').lower()
                                        for file_type in value.split(',')] if value else None

    def get_anonymous_user_id(self, username, course_id):
        """
        Get the anonymous user id from Xblock user service.

        Args:
            username(str): user's name entered by staff to get info.
            course_id(str): course id.

        Returns:
            A unique id for (user, course) pair
        """
        return self.runtime.service(self, 'user').get_anonymous_user_id(username, course_id)

    def is_user_state_service_available(self):
        """
        Check if the user state service is present in runtime.
        """
        try:
            self.runtime.service(self, 'user_state')
            return True
        except NoSuchServiceError:
            return False

    def get_user_state(self, username):
        """
        Get the student module state for the given username for current ORA block.

        Arguments:
            username(str): username against which the state is required in the current block.

        Returns:
            user state, if found, else empty dict
        """
        if self.is_user_state_service_available():
            user_state_service = self.runtime.service(self, 'user_state')
            return user_state_service.get_state_as_dict(username, str(self.location))  # pylint: disable=no-member
        return {}

    def should_use_user_state(self, upload_urls):
        """
        Return a boolean if the user state is used for additional data checks.

        User state is utilized when all of the following are true:
        1. user state service is available(which is only part of courseware)
        2. The waffle flag/switch is enabled
        3. the file upload is required or optional
        4. the file data from submission is missing information
        """
        return not any(upload_urls) \
            and self.is_user_state_service_available() \
            and self.user_state_upload_data_enabled \
            and self.file_upload_response

    def should_get_all_files_urls(self, upload_urls):
        """
        Returns a boolean to decide if all the file submitted by a learner in a block should be obtained.

        Following conditions should be true for boolean to be true:
        1. The waffle flag/switch is enabled
        2. the file upload is required or optional
        3. the file data from submission is missing information

        Arguments:
            upload_urls(list): A list of (file url, description, name) tuple, if info present, else empty list
        """
        return not any(upload_urls) \
            and self.is_fetch_all_urls_waffle_enabled \
            and self.file_upload_response

    def get_student_item_dict_from_username_or_email(self, username_or_email):
        """
        Get the item dict for a given username or email in the parent course of block.
        """
        anonymous_user_id = self.get_anonymous_user_id(username_or_email, self.course_id)
        return self.get_student_item_dict(anonymous_user_id=anonymous_user_id)

    def get_anonymous_user_id_from_xmodule_runtime(self):
        if hasattr(self, "xmodule_runtime"):
            return self.xmodule_runtime.anonymous_student_id  # pylint:disable=E1101
        return None

    def get_student_item_dict(self, anonymous_user_id=None):
        """Create a student_item_dict from our surrounding context.

        See also: submissions.api for details.

        Args:
            anonymous_user_id(str): A unique anonymous_user_id for (user, course) pair.
        Returns:
            (dict): The student item associated with this XBlock instance. This
                includes the student id, item id, and course id.
        """

        item_id = str(self.scope_ids.usage_id)

        # This is not the real way course_ids should work, but this is a
        # temporary expediency for LMS integration
        if hasattr(self, "xmodule_runtime"):
            course_id = self.course_id
            if anonymous_user_id:
                student_id = anonymous_user_id
            else:
                student_id = self.xmodule_runtime.anonymous_student_id  # pylint:disable=E1101
        else:
            course_id = "edX/Enchantment_101/April_1"
            if self.scope_ids.user_id is None:
                student_id = None
            else:
                student_id = str(self.scope_ids.user_id)

        student_item_dict = dict(
            student_id=student_id,
            item_id=item_id,
            course_id=course_id,
            item_type='openassessment'
        )
        return student_item_dict

    def add_javascript_files(self, fragment, item):
        """
        Add all the JavaScript files from a directory to the specified fragment
        """
        if pkg_resources.resource_isdir(__name__, item):
            for child_item in pkg_resources.resource_listdir(__name__, item):
                path = os.path.join(item, child_item)
                if not pkg_resources.resource_isdir(__name__, path):
                    fragment.add_javascript_url(self.runtime.local_resource_url(self, path))
        else:
            fragment.add_javascript_url(self.runtime.local_resource_url(self, item))

    def student_view(self, context=None):  # pylint: disable=unused-argument
        """The main view of OpenAssessmentBlock, displayed when viewing courses.

        The main view which displays the general layout for Open Ended
        Assessment Questions. The contents of the XBlock are determined
        dynamically based on the assessment workflow configured by the author.

        Args:
            context: Not used for this view.

        Returns:
            (Fragment): The HTML Fragment for this XBlock, which determines the
            general frame of the Open Ended Assessment Question.
        """
        # On page load, update the workflow status.
        # We need to do this here because peers may have graded us, in which
        # case we may have a score available.

        try:
            self.update_workflow_status()
        except AssessmentWorkflowError:
            # Log the exception, but continue loading the page
            logger.exception('An error occurred while updating the workflow on page load.')

        ui_models = self._create_ui_models()
        # All data we intend to pass to the front end.
        context_dict = {
            "title": self.title,
            "prompts": self.prompts,
            "prompts_type": self.prompts_type,
            "rubric_assessments": ui_models,
            "show_staff_area": self.is_course_staff and not self.in_studio_preview,
        }
        template = get_template("openassessmentblock/oa_base.html")

        if self.teams_enabled and not self.valid_access_to_team_assessment():
            context_dict['rubric_assessments'] = []

        return self._create_fragment(template, context_dict, initialize_js_func='OpenAssessmentBlock')

    def ora_blocks_listing_view(self, context=None):
        """This view is used in the Open Response Assessment tab in the LMS Instructor Dashboard
        to display all available course ORA blocks.

        Args:
            context: contains two items:
                "ora_items" - all course items with names and parents, example:
                    [{"parent_name": "Vertical name",
                      "name": "ORA Display Name",
                      "url_grade_available_responses": "/grade_available_responses_view",
                      "staff_assessment": false,
                      "parent_id": "vertical_block_id",
                      "url_base": "/student_view",
                      "id": "openassessment_block_id"
                     }, ...]
                "ora_item_view_enabled" - enabled LMS API endpoint to serve XBlock view or not

        Returns:
            (Fragment): The HTML Fragment for this XBlock.
        """
        ora_items = context.get('ora_items', []) if context else []
        ora_item_view_enabled = context.get('ora_item_view_enabled', False) if context else False
        context_dict = {
            "ora_items": json.dumps(ora_items),
            "ora_item_view_enabled": ora_item_view_enabled
        }

        template = get_template('openassessmentblock/instructor_dashboard/oa_listing.html')

        min_postfix = '.min' if settings.DEBUG else ''

        return self._create_fragment(
            template,
            context_dict,
            initialize_js_func='CourseOpenResponsesListingBlock',
            additional_css=["static/css/lib/backgrid/backgrid%s.css" % min_postfix],
            additional_js=["static/js/lib/backgrid/backgrid%s.js" % min_postfix]
        )

    def grade_available_responses_view(self, context=None):  # pylint: disable=unused-argument
        """Grade Available Responses view.

        Auxiliary view which displays the staff grading area
        (used in the Open Response Assessment tab in the Instructor Dashboard of LMS)

        Args:
            context: Not used for this view.

        Returns:
            (Fragment): The HTML Fragment for this XBlock.
        """
        student_item = self.get_student_item_dict()
        staff_assessment_required = "staff-assessment" in self.assessment_steps

        context_dict = {
            "title": self.title,
            'staff_assessment_required': staff_assessment_required
        }

        if staff_assessment_required:
            context_dict.update(
                self.get_staff_assessment_statistics_context(student_item["course_id"], student_item["item_id"])
            )

        template = get_template('openassessmentblock/instructor_dashboard/oa_grade_available_responses.html')

        return self._create_fragment(template, context_dict, initialize_js_func='StaffAssessmentBlock')

    def _create_fragment(self, template, context_dict, initialize_js_func, additional_css=None, additional_js=None):
        """
        Creates a fragment for display.

        """
        fragment = Fragment(template.render(context_dict))

        if additional_css is None:
            additional_css = []
        if additional_js is None:
            additional_js = []

        i18n_service = self.runtime.service(self, 'i18n')
        if hasattr(i18n_service, 'get_language_bidi') and i18n_service.get_language_bidi():
            css_url = "static/css/openassessment-rtl.css"
        else:
            css_url = "static/css/openassessment-ltr.css"

        if settings.DEBUG:
            for css in additional_css:
                fragment.add_css_url(self.runtime.local_resource_url(self, css))
            fragment.add_css_url(self.runtime.local_resource_url(self, css_url))

            for js in additional_js:  # pylint: disable=invalid-name
                self.add_javascript_files(fragment, js)
            self.add_javascript_files(fragment, "static/js/src/oa_shared.js")
            self.add_javascript_files(fragment, "static/js/src/oa_server.js")
            self.add_javascript_files(fragment, "static/js/src/lms")
        else:
            # TODO: load CSS and JavaScript as URLs once they can be served by the CDN
            for css in additional_css:
                fragment.add_css(load(css))
            fragment.add_css(load(css_url))

            # minified additional_js should be already included in 'make javascript'
            fragment.add_javascript(load("static/js/openassessment-lms.min.js"))
        js_context_dict = {
            "ALLOWED_IMAGE_MIME_TYPES": self.ALLOWED_IMAGE_MIME_TYPES,
            "ALLOWED_FILE_MIME_TYPES": self.ALLOWED_FILE_MIME_TYPES,
            "FILE_EXT_BLACK_LIST": self.FILE_EXT_BLACK_LIST,
            "FILE_TYPE_WHITE_LIST": self.white_listed_file_types,
            "MAXIMUM_FILE_UPLOAD_COUNT": self.MAX_FILES_COUNT,
            "TEAM_ASSIGNMENT": self.is_team_assignment()
        }
        fragment.initialize_js(initialize_js_func, js_context_dict)
        return fragment

    @property
    def is_admin(self):
        """
        Check whether the user has global staff permissions.

        Returns:
            bool
        """
        if hasattr(self, 'xmodule_runtime'):
            return getattr(self.xmodule_runtime, 'user_is_admin', False)  # pylint: disable=no-member
        return False

    @property
    def is_course_staff(self):
        """
        Check whether the user has course staff permissions for this XBlock.

        Returns:
            bool
        """
        if hasattr(self, 'xmodule_runtime'):
            return getattr(self.xmodule_runtime, 'user_is_staff', False)  # pylint: disable=no-member
        return False

    @property
    def is_beta_tester(self):
        """
        Check whether the user is a beta tester.

        Returns:
            bool
        """
        if hasattr(self, 'xmodule_runtime'):
            return getattr(self.xmodule_runtime, 'user_is_beta_tester', False)  # pylint: disable=no-member
        return False

    @property
    def in_studio_preview(self):
        """
        Check whether we are in Studio preview mode.

        Returns:
            bool

        """
        # When we're running in Studio Preview mode, the XBlock won't provide us with a user ID.
        # (Note that `self.xmodule_runtime` will still provide an anonymous
        # student ID, so we can't rely on that)
        return self.scope_ids.user_id is None

    def _create_ui_models(self):
        """Combine UI attributes and XBlock configuration into a UI model.

        This method takes all configuration for this XBlock instance and appends
        UI attributes to create a UI Model for rendering all assessment modules.
        This allows a clean separation of static UI attributes from persistent
        XBlock configuration.

        """
        ui_models = [UI_MODELS["submission"]]
        staff_assessment_required = False
        for assessment in self.valid_assessments:
            if assessment["name"] == "staff-assessment":
                if not assessment["required"]:
                    continue
                staff_assessment_required = True
            ui_model = UI_MODELS.get(assessment["name"])
            if ui_model:
                ui_models.append(dict(assessment, **ui_model))

        if not staff_assessment_required and self.staff_assessment_exists(self.submission_uuid):
            ui_models.append(UI_MODELS["staff-assessment"])

        ui_models.append(UI_MODELS["grade"])

        if self.leaderboard_show > 0:
            ui_models.append(UI_MODELS["leaderboard"])

        return ui_models

    @staticmethod
    def workbench_scenarios():
        """A canned scenario for display in the workbench.

        These scenarios are only intended to be used for Workbench XBlock
        Development.

        """
        return [
            (
                "OpenAssessmentBlock File Upload: Images",
                load('static/xml/file_upload_image_only.xml')
            ),
            (
                "OpenAssessmentBlock File Upload: PDF and Images",
                load('static/xml/file_upload_pdf_and_image.xml')
            ),
            (
                "OpenAssessmentBlock File Upload: Custom File Types",
                load('static/xml/file_upload_custom.xml')
            ),
            (
                "OpenAssessmentBlock File Upload: allow_file_upload compatibility",
                load('static/xml/file_upload_compat.xml')
            ),
            (
                "OpenAssessmentBlock Unicode",
                load('static/xml/unicode.xml')
            ),
            (
                "OpenAssessmentBlock Poverty Rubric",
                load('static/xml/poverty_rubric_example.xml')
            ),
            (
                "OpenAssessmentBlock Leaderboard",
                load('static/xml/leaderboard.xml')
            ),
            (
                "OpenAssessmentBlock Leaderboard with Custom File Type",
                load('static/xml/leaderboard_custom.xml')
            ),
            (
                "OpenAssessmentBlock (Peer Only) Rubric",
                load('static/xml/poverty_peer_only_example.xml')
            ),
            (
                "OpenAssessmentBlock (Self Only) Rubric",
                load('static/xml/poverty_self_only_example.xml')
            ),
            (
                "OpenAssessmentBlock Censorship Rubric",
                load('static/xml/censorship_rubric_example.xml')
            ),
            (
                "OpenAssessmentBlock Promptless Rubric",
                load('static/xml/promptless_rubric_example.xml')
            ),
        ]

    @classmethod
    def parse_xml(cls, node, runtime, keys, id_generator):
        """Instantiate XBlock object from runtime XML definition.

        Inherited by XBlock core.

        """
        config = parse_from_xml(node)
        block = runtime.construct_xblock_from_class(cls, keys)

        xblock_validator = validator(block, block._, strict_post_release=False)
        xblock_validator(
            create_rubric_dict(config['prompts'], config['rubric_criteria']),
            config['rubric_assessments'],
            submission_start=config['submission_start'],
            submission_due=config['submission_due'],
            leaderboard_show=config['leaderboard_show']
        )

        block.rubric_criteria = config['rubric_criteria']
        block.rubric_feedback_prompt = config['rubric_feedback_prompt']
        block.rubric_feedback_default_text = config['rubric_feedback_default_text']
        block.rubric_assessments = config['rubric_assessments']
        block.submission_start = config['submission_start']
        block.submission_due = config['submission_due']
        block.title = config['title']
        block.prompts = config['prompts']
        block.prompts_type = config['prompts_type']
        block.text_response = config['text_response']
        block.file_upload_response = config['file_upload_response']
        block.allow_file_upload = config['allow_file_upload']
        block.file_upload_type = config['file_upload_type']
        block.white_listed_file_types_string = config['white_listed_file_types']
        block.allow_latex = config['allow_latex']
        block.leaderboard_show = config['leaderboard_show']
        block.group_access = config['group_access']

        return block

    @property
    def _(self):
        i18nService = self.runtime.service(self, 'i18n')  # pylint: disable=invalid-name
        return i18nService.ugettext

    @property
    def prompts(self):
        """
        Return the prompts.

        Initially a block had a single prompt which was saved as a simple
        string in the prompt field. Now prompts are saved as a serialized
        list of dicts in the same field. If prompt field contains valid json,
        parse and return it. Otherwise, assume it is a simple string prompt
        and return it in a list of dict.

        Returns:
            list of dict
        """
        return create_prompts_list(self.prompt)

    @prompts.setter
    def prompts(self, value):
        """
        Serialize the prompts and save to prompt field.

        Args:
            value (list of dict): The prompts to set.
        """

        if value is None:
            self.prompt = None
        elif len(value) == 1:
            # For backwards compatibility. To be removed after all code
            # is migrated to use prompts property instead of prompt field.
            self.prompt = value[0]['description']
        else:
            self.prompt = json.dumps(value)

    @property
    def valid_assessments(self):
        """
        Return a list of assessment dictionaries that we recognize.
        This allows us to gracefully handle situations in which unrecognized
        assessment types are stored in the XBlock field (e.g. because
        we roll back code after releasing a feature).

        Returns:
            list

        """
        assessment_types = self.VALID_ASSESSMENT_TYPES
        if self.teams_enabled:
            assessment_types = self.VALID_ASSESSMENT_TYPES_FOR_TEAMS

        _valid_assessments = [
            asmnt for asmnt in self.rubric_assessments
            if asmnt.get('name') in assessment_types
        ]
        return update_assessments_format(copy.deepcopy(_valid_assessments))

    @property
    def assessment_steps(self):
        return [asmnt['name'] for asmnt in self.valid_assessments]

    @lazy
    def rubric_criteria_with_labels(self):
        """
        Backwards compatibility: We used to treat "name" as both a user-facing label
        and a unique identifier for criteria and options.
        Now we treat "name" as a unique identifier, and we've added an additional "label"
        field that we display to the user.
        If criteria/options in the problem definition do NOT have a "label" field
        (because they were created before this change),
        we create a new label that has the same value as "name".

        The result of this call is cached, so it should NOT be used in a runtime
        that can modify the XBlock settings (in the LMS, settings are read-only).

        Returns:
            list of criteria dictionaries

        """
        criteria = copy.deepcopy(self.rubric_criteria)
        for criterion in criteria:
            if 'label' not in criterion:
                criterion['label'] = criterion['name']
            for option in criterion['options']:
                if 'label' not in option:
                    option['label'] = option['name']
        return criteria

    def render_assessment(self, path, context_dict=None):
        """Render an Assessment Module's HTML

        Given the name of an assessment module, find it in the list of
        configured modules, and ask for its rendered HTML.

        Args:
            path (str): The path to the template used to render this HTML
                section.
            context_dict (dict): A dictionary of context variables used to
                populate this HTML section.

        Returns:
            (Response): A Response Object with the generated HTML fragment. This
                is intended for AJAX calls to load dynamically into a larger
                document.
        """
        if not context_dict:
            context_dict = {}

        template = get_template(path)
        return Response(template.render(context_dict), content_type='application/html', charset='UTF-8')

    def add_xml_to_node(self, node):
        """
        Serialize the XBlock to XML for exporting.
        """
        serialize_content_to_xml(self, node)

    def render_error(self, error_msg):
        """
        Render an error message.

        Args:
            error_msg (unicode): The error message to display.

        Returns:
            Response: A response object with an HTML body.
        """
        context = {'error_msg': error_msg}
        template = get_template('openassessmentblock/oa_error.html')
        return Response(template.render(context), content_type='application/html', charset='UTF-8')

    def is_closed(self, step=None, course_staff=None):
        """
        Checks if the question is closed.

        Determines if the start date is in the future or the end date has
            passed.  Optionally limited to a particular step in the workflow.

        Start/due dates do NOT apply to course staff, since course staff may need to get to
        the peer grading step AFTER the submission deadline has passed.
        This may not be necessary when we implement a grading interface specifically for course staff.

        Keyword Arguments:
            step (str): The step in the workflow to check.  Options are:
                None: check whether the problem as a whole is open.
                "submission": check whether the submission section is open.
                "peer-assessment": check whether the peer-assessment section is open.
                "self-assessment": check whether the self-assessment section is open.

            course_staff (bool): Whether to treat the user as course staff (disable start/due dates).
                If not specified, default to the current user's status.

        Returns:
            tuple of the form (is_closed, reason, start_date, due_date), where
                is_closed (bool): indicates whether the step is closed.
                reason (str or None): specifies the reason the step is closed ("start" or "due")
                start_date (datetime): is the start date of the step/problem.
                due_date (datetime): is the due date of the step/problem.

        Examples:
            >>> is_closed()
            False, None, datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
            datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
            >>> is_closed(step="submission")
            True, "due", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
            datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)
            >>> is_closed(step="self-assessment")
            True, "start", datetime.datetime(2014, 3, 27, 22, 7, 38, 788861),
            datetime.datetime(2015, 3, 27, 22, 7, 38, 788861)

        """
        submission_range = (self.submission_start, self.submission_due)
        assessment_ranges = [
            (asmnt.get('start'), asmnt.get('due'))
            for asmnt in self.valid_assessments
        ]

        # Resolve unspecified dates and date strings to datetimes
        start, due, date_ranges = resolve_dates(
            self.start, self.due, [submission_range] + assessment_ranges, self._
        )

        open_range = (start, due)
        assessment_steps = self.assessment_steps
        if step == 'submission':
            open_range = date_ranges[0]
        elif step in assessment_steps:
            step_index = assessment_steps.index(step)
            open_range = date_ranges[1 + step_index]

        # Course staff always have access to the problem
        if course_staff is None:
            course_staff = self.is_course_staff
        if course_staff:
            return False, None, DISTANT_PAST, DISTANT_FUTURE

        if self.is_beta_tester:
            beta_start = self._adjust_start_date_for_beta_testers(open_range[0])
            open_range = (beta_start, open_range[1])

        # Check if we are in the open date range
        now = dt.datetime.utcnow().replace(tzinfo=pytz.utc)

        if now < open_range[0]:
            return True, "start", open_range[0], open_range[1]
        elif now >= open_range[1]:
            return True, "due", open_range[0], open_range[1]
        return False, None, open_range[0], open_range[1]

    def get_waiting_details(self, status_details):
        """
        Returns waiting status (boolean value) based on the given status_details.

        Args:
            status_details (dict): A dictionary containing the details of each
                assessment module status. This will contain keys such as
                "peer", "ai", and "staff", referring to dictionaries, which in
                turn will have the key "graded". If this key has a value set,
                these assessment modules have been graded.

        Returns:
            True if waiting for a grade from peer, ai, or staff assessment, else False.

        Examples:
            >>> now = dt.datetime.utcnow().replace(tzinfo=pytz.utc)
            >>> status_details = {
            >>>     'peer': {
            >>>         'completed': None,
            >>>         'graded': now
            >>>     },
            >>>     'ai': {
            >>>         'completed': now,
            >>>         'graded': None
            >>>     }
            >>> }
            >>> self.get_waiting_details(status_details)
            True
        """
        steps = ["peer", "ai", "staff"]  # These are the steps that can be submitter-complete, but lack a grade
        for step in steps:
            if step in status_details and not status_details[step]["graded"]:
                return True
        return False

    def is_released(self, step=None):
        """
        Check if a question has been released.

        Keyword Arguments:
            step (str): The step in the workflow to check.
                None: check whether the problem as a whole is open.
                "submission": check whether the submission section is open.
                "peer-assessment": check whether the peer-assessment section is open.
                "self-assessment": check whether the self-assessment section is open.

        Returns:
            bool
        """
        # By default, assume that we're published, in case the runtime doesn't support publish date.
        if hasattr(self.runtime, 'modulestore'):
            is_published = self.runtime.modulestore.has_published_version(self)
        else:
            is_published = True
        is_closed, reason, __, __ = self.is_closed(step=step)  # pylint: disable=redeclared-assigned-name
        is_released = is_published and (not is_closed or reason == 'due')
        if self.start:
            is_released = is_released and dt.datetime.now(pytz.UTC) > parse_date_value(self.start, self._)
        return is_released

    def get_assessment_module(self, mixin_name):
        """
        Get a configured assessment module by name.

        Args:
            mixin_name (str): The name of the mixin (e.g. "self-assessment" or "peer-assessment")

        Returns:
            dict

        Example:
            >>> self.get_assessment_module('peer-assessment')
            {
                "name": "peer-assessment",
                "start": None,
                "due": None,
                "must_grade": 5,
                "must_be_graded_by": 3,
            }
        """
        for assessment in self.valid_assessments:
            if assessment["name"] == mixin_name:
                return assessment
        return None

    def publish_assessment_event(self, event_name, assessment, **kwargs):
        """
        Emit an analytics event for the peer assessment.

        Args:
            event_name (str): An identifier for this event type.
            assessment (dict): The serialized assessment model.

        Returns:
            None

        """
        parts_list = []
        for part in assessment["parts"]:
            # Some assessment parts do not include point values,
            # only written feedback.  In this case, the assessment
            # part won't have an associated option.
            option_dict = None
            if part["option"] is not None:
                option_dict = {
                    "name": part["option"]["name"],
                    "points": part["option"]["points"],
                }

            # All assessment parts are associated with criteria
            criterion_dict = {
                "name": part["criterion"]["name"],
                "points_possible": part["criterion"]["points_possible"]
            }

            parts_list.append({
                "option": option_dict,
                "criterion": criterion_dict,
                "feedback": part["feedback"]
            })

        event_data = {
            "feedback": assessment["feedback"],
            "rubric": {
                "content_hash": assessment["rubric"]["content_hash"],
            },
            "scorer_id": assessment["scorer_id"],
            "score_type": assessment["score_type"],
            "scored_at": assessment["scored_at"],
            "submission_uuid": assessment["submission_uuid"],
            "parts": parts_list
        }

        for key in kwargs:
            event_data[key] = kwargs[key]

        self.runtime.publish(
            self, event_name,
            event_data
        )

    @XBlock.json_handler
    def publish_event(self, data, suffix=''):  # pylint: disable=unused-argument
        """
        Publish the given data to an event.

        Expects key 'event_name' to be present in the data dictionary.
        """

        try:
            event_name = data['event_name']
        except KeyError:
            logger.exception("Could not find the name of the event to be triggered.")
            return {'success': False}

        # Remove the name so we don't publish as part of the data.
        del data['event_name']

        self.runtime.publish(self, event_name, data)
        return {'success': True}

    def get_real_user(self, anonymous_user_id):
        """
        Return the user associated with anonymous_user_id
        Args:
            anonymous_user_id (str): the anonymous user id of the user

        Returns: the user model for the user if it can be identified.
            If the xblock service to converts to a real user fails,
            returns None and logs the error.

        """
        if hasattr(self, "xmodule_runtime"):
            if self.xmodule_runtime.get_real_user is None:  # pylint: disable=no-member
                return None
            user = self.xmodule_runtime.get_real_user(anonymous_user_id)  # pylint: disable=no-member
            if user:
                return user
            logger.exception(
                u"XBlock service could not find user for anonymous_user_id '{}'".format(anonymous_user_id)
            )
        return None

    def get_username(self, anonymous_user_id):
        """
        Return the username of the user associated with anonymous_user_id
        Args:
            anonymous_user_id (str): the anonymous user id of the user

        Returns: the username if it can be identified.

        """
        user = self.get_real_user(anonymous_user_id)
        if user:
            return user.username
        return None

    def _adjust_start_date_for_beta_testers(self, start):
        """
        Returns the start date for a Beta tester.
        """
        if hasattr(self, "xmodule_runtime"):
            days_early_for_beta = getattr(self.xmodule_runtime, 'days_early_for_beta', 0)  # pylint: disable=no-member
            if days_early_for_beta is not None:
                delta = dt.timedelta(days_early_for_beta)
                effective = start - delta
                return effective

        return start

    def get_xblock_id(self):
        """
        Returns the xblock id
        """
        return str(self.scope_ids.usage_id)

    def _clean_data(self, data):
        cleaner = Cleaner(tags=[], strip=True)
        cleaned_text = " ".join(re.split(r"\s+", cleaner.clean(data), flags=re.UNICODE)).strip()
        return cleaned_text

    def index_dictionary(self):
        """
        Return dictionary prepared with module content and type for indexing.
        """

        # return key/value fields in a Python dict object
        # values may be numeric / string or dict
        # default implementation is an empty dict
        xblock_body = super(OpenAssessmentBlock, self).index_dictionary()

        # Check whether there is only one prompt or more than one
        # If there is single prompt, self.prompt would be simply a string
        # otherwise self.prompt would have json embedded in the string.
        try:
            prompt = {
                "prompt_{}".format(prompt_i): self._clean_data(prompt.get("description", ""))
                for prompt_i, prompt in enumerate(json.loads(self.prompt))
            }
        except ValueError:
            prompt = {
                "prompt": self._clean_data(self.prompt)
            }

        content = {
            "display_name": self.display_name,
            "title": self.title,
            **prompt
        }

        if "content" in xblock_body:
            xblock_body["content"].update(content)
        else:
            xblock_body["content"] = content

        xblock_body["content_type"] = "ORA"

        return xblock_body
Пример #28
0
 class FieldTester(XBlock):
     """Test block for set - get test."""
     non_mutable = String(scope=Scope.settings)
     list_field = List(scope=Scope.settings)
     dict_field = Dict(scope=Scope.settings)
Пример #29
0
class GoogleDocumentBlock(XBlock, PublishEventMixin):
    """
    XBlock providing a google document embed link
    """
    @XBlock.json_handler
    def source_library_values(self, data, suffix=""):
        """
        Return a list of possible values for self.source_library_id
        """
        """
        Return a list of possible values for self.source_library_id
        """
        lib_tools = self.runtime.service(self, 'library_tools')
        user_perms = self.runtime.service(self, 'studio_user_permissions')
        all_libraries = [(key, name)
                         for key, name in lib_tools.list_available_libraries()
                         if user_perms.can_read(key)
                         or self.source_library_id == unicode(key)]
        all_libraries.sort(key=lambda entry: entry[1])  # Sort by name
        if self.source_library_id and self.source_library_key not in [
                entry[0] for entry in all_libraries
        ]:
            all_libraries.append(
                (self.source_library_id, _(u"Invalid Library")))
        all_libraries = [(u"", _("No Library Selected"))] + all_libraries
        values = [{
            "display_name": name,
            "value": unicode(key)
        } for key, name in all_libraries]
        return values

    @XBlock.json_handler
    def source_problem_values(self, data, suffix=""):
        """
        Return list of possible problems for self.select_problems
        """
        library_selected = data['source_library_id']
        lib_tools = self.runtime.service(self, 'library_tools')
        user_perms = self.runtime.service(self, 'studio_user_permissions')
        lib_root = self.runtime.service(self, 'library_root_xblock')
        blk_list = []
        all_problems = lib_tools.list_available_problems(library_selected)
        # blk_list= [(c.block_type, c.block_id) for c in all_problems]
        # values = [{"display_name": name, "value": name} for key,name in blk_list]
        # test=[]
        # for problem in all_problems:
        #    test.extend( [{"display_name": problem, "value": problem} ])
        return all_problems  # all_problems #values

    display_name = String(
        display_name=_("Display Name"),
        help=
        _("This name appears in the horizontal navigation at the top of the page."
          ),
        scope=Scope.settings,
        default="Problem from Library")
    embed_code = String(
        display_name=_("Embed Code"),
        help=
        _("Google provides an embed code for Drive documents. In the Google Drive document, "
          "from the File menu, select Publish to the Web. Modify settings as needed, click "
          "Publish, and copy the embed code into this field."),
        scope=Scope.settings,
        default=DEFAULT_EMBED_CODE)
    alt_text = String(
        display_name=_("Alternative Text"),
        help=
        _("Alternative text describes an image and appears if the image is unavailable."
          ),
        scope=Scope.settings,
        default="")

    source_library_id = String(
        display_name=_("Library"),
        help=_("Select the library from which you want to draw content."),
        scope=Scope.settings,
        values_provider=lambda instance: instance.source_library_values(),
    )
    source_library_version = String(
        # This is a hidden field that stores the version of source_library when we last pulled content from it
        display_name=_("Library Version"),
        scope=Scope.settings,
    )
    mode = String(
        display_name=_("Mode"),
        help=_("Determines how content is drawn from the library"),
        default="random",
        values=
        [{
            "display_name": _("Choose n at random"),
            "value": "random"
        }
         # Future addition: Choose a new random set of n every time the student refreshes the block, for self tests
         # Future addition: manually selected blocks
         ],
        scope=Scope.settings,
    )

    source_problem_id = String(
        display_name=_("Problem"),
        help=_("Select the library from which you want to draw content."),
        scope=Scope.settings,
        values_provider=lambda instance: instance.source_problem_values(),
    )

    selected = List(
        # This is a list of (block_type, block_id) tuples used to record
        # which random/first set of matching blocks was selected per user
        default=[],
        scope=Scope.user_state,
    )
    has_children = True

    @property
    def source_library_key(self):
        """
        Convenience method to get the library ID as a LibraryLocator and not just a string
        """
        return LibraryLocator.from_string(self.source_library_id)

    def _get_selected_child_blocks(self):
        """
        Generator returning XBlock instances of the children selected for the
        current user.
        """
        selected = set([(c.block_type, c.block_id) for c in self.children])
        for block_type, block_id in selected:
            yield self.runtime.get_block(
                self.location.course_key.make_usage_key(block_type, block_id))
            pass

    def get_user_id(self):
        user_service = self.runtime.service(self, 'user')
        if user_service:
            # May be None when creating bok choy test fixtures
            user_id = user_service.get_current_user().opt_attrs.get(
                'edx-platform.user_id', None)
        else:
            user_id = None
        return user_id

    # Context argument is specified for xblocks, but we are not using herein

    # Context argument is specified for xblocks, but we are not using herein
    def studio_view(self, context):  # pylint: disable=unused-argument
        """
        Editing view in Studio
        """
        from xmodule.x_module import STUDENT_VIEW, XModule
        fragment = Fragment()
        # Need to access protected members of fields to get their default value
        default_name = self.fields['display_name']._default  # pylint: disable=protected-access,unsubscriptable-object
        fragment.add_content(
            RESOURCE_LOADER.render_template(DOCUMENT_EDIT_TEMPLATE, {
                'self': self,
                'defaultName': default_name,
            }))
        fragment.add_javascript(
            RESOURCE_LOADER.load_unicode('public/js/google_docs_edit.js'))
        fragment.add_css(
            RESOURCE_LOADER.load_unicode('public/css/google_edit.css'))

        fragment.initialize_js('GoogleDocumentEditBlock')

        return fragment

    # suffix argument is specified for xblocks, but we are not using herein
    @XBlock.json_handler
    def studio_submit(self, submissions, suffix=''):  # pylint: disable=unused-argument
        """
        Change the settings for this XBlock given by the Studio user
        """

        if not isinstance(submissions, dict):
            LOG.error("submissions object from Studio is not a dict - %r",
                      submissions)
            return {'result': 'error'}

        if 'display_name' in submissions:
            self.display_name = submissions['display_name']
        if 'library' in submissions:
            self.source_library_id = submissions['library']
        if 'problem' in submissions:
            self.source_problem_id = submissions['problem']
        self.children = []
        value = "test"

        self.source_library_version = None

        lib_tools = self.runtime.service(self, 'library_tools')
        # format_block_keys = lambda keys: lib_tools.create_block_analytics_summary(self.location.course_key, [self.source_problem_id])
        # library_content=self.runtime.service(self, 'library_content_module')
        user_id = self.get_user_id()
        # locator =
        lib_tools.librarydetail(
            self,
            self.source_library_id,
            self.source_problem_id,
            user_id,
        )
        # if locator:
        #    for child_key in locator.children:
        #         child = self.runtime.get_block(child_key)
        # child_view_name = StudioEditableModule.get_preview_view_name(child)
        # for  block_id in format_block_keys :
        #    yield self.runtime.get_block(self.location.course_key.make_usage_key('problem', block_id))
        # value= lib_tools.create_block_analytics_summary(self.location.course_key, [self.source_problem_id])
        # self.children.append(format_block_keys)
        value = "test"
        return {
            'result': 'success',
            'value': value,
        }

    def student_view(self, context):  # pylint: disable=unused-argument
        """
        Player view, displayed to the student
        """

        fragment = Fragment()
        contents = []
        child_context = {} if not context else copy(context)

        for child in self._get_selected_child_blocks():
            for displayable in child.displayable_items():
                rendered_child = displayable.render(STUDENT_VIEW,
                                                    child_context)
                fragment.add_fragment_resources(rendered_child)
                contents.append({
                    'id': text_type(displayable.location),
                    'content': rendered_child.content,
                })

        fragment.add_content(
            self.system.render_template(
                'vert_module.html', {
                    'items': contents,
                    'xblock_context': context,
                    'show_bookmark_button': False,
                    'watched_completable_blocks': set(),
                    'completion_        delay_ms': None,
                }))

        # str(contents[0].content)
        '''
        fragment.add_content(RESOURCE_LOADER.render_django_template(
            DOCUMENT_TEMPLATE,
            context={"self": self, "contents": contents},
            i18n_service=self.runtime.service(self, 'i18n'),
        ))
        fragment.add_css(RESOURCE_LOADER.load_unicode('public/css/google_docs.css'))
        fragment.add_javascript(RESOURCE_LOADER.load_unicode('public/js/google_docs.js'))

        fragment.initialize_js('GoogleDocumentBlock')
        '''
        return fragment

    # suffix argument is specified for xblocks, but we are not using herein
    @XBlock.json_handler
    def check_url(self, data, suffix=''):  # pylint: disable=unused-argument,no-self-use
        """
        Checks that the given document url is accessible, and therefore assumed to be valid
        """
        try:
            test_url = data['url']
        except KeyError as ex:
            LOG.debug("URL not provided - %s", unicode(ex))
            return {
                'status_code': 400,
            }

        try:
            url_response = requests.head(test_url)
        # Catch wide range of request exceptions
        except requests.exceptions.RequestException as ex:
            LOG.debug("Unable to connect to %s - %s", test_url, unicode(ex))
            return {
                'status_code': 400,
            }

        return {
            'status_code': url_response.status_code,
        }

    @staticmethod
    def workbench_scenarios():
        """
        A canned scenario for display in the workbench.
        """
        return [("Google Docs scenario",
                 "<vertical_demo><google-document/></vertical_demo>")]
class LibrarySourcedBlock(StudioEditableXBlockMixin, EditableChildrenMixin,
                          XBlock):
    """
    Library Sourced Content XBlock

    Allows copying specific XBlocks from a Blockstore-based content library into
    a modulestore-based course. The selected blocks are copied and become
    children of this block.

    When we implement support for Blockstore-based courses, it's expected we'll
    use a different mechanism for importing library content into a course.
    """
    display_name = String(
        help=_("The display name for this component."),
        default="Library Sourced Content",
        display_name=_("Display Name"),
        scope=Scope.content,
    )
    source_block_ids = List(
        display_name=_("Library Blocks List"),
        help=_("Enter the IDs of the library XBlocks that you wish to use."),
        scope=Scope.content,
    )
    editable_fields = ("display_name", "source_block_ids")
    has_children = True
    has_author_view = True
    resources_dir = 'assets/library_source_block'
    MAX_BLOCKS_ALLOWED = 10

    def __str__(self):
        return f"LibrarySourcedBlock: {self.display_name}"

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        if not self.source_block_ids:
            self.has_children = False

    def studio_view(self, context):
        """
        Render a form for editing this XBlock
        """
        fragment = Fragment()
        static_content = ResourceLoader(
            'common.djangoapps.pipeline_mako').load_unicode(
                'templates/static_content.html')
        render_react = MakoTemplate(static_content,
                                    default_filters=[]).get_def('renderReact')
        react_content = render_react.render(
            component="LibrarySourcedBlockPicker",
            id="library-sourced-block-picker",
            props={
                'selectedXblocks': self.source_block_ids,
            })
        fragment.content = loader.render_django_template(
            'templates/library-sourced-block-studio-view.html', {
                'react_content': react_content,
                'save_url': self.runtime.handler_url(self,
                                                     'submit_studio_edits'),
            })

        fragment.add_javascript_url(
            self.runtime.local_resource_url(
                self, 'public/js/library_source_block.js'))
        fragment.initialize_js('LibrarySourceBlockStudioView')

        return fragment

    def author_view(self, context):
        """
        Renders the Studio preview view.
        """
        fragment = Fragment()
        context = {} if not context else copy(
            context
        )  # Isolate context - without this there are weird bugs in Studio
        # EditableChildrenMixin.render_children will render HTML that allows instructors to make edits to the children
        context['can_move'] = False
        self.render_children(context,
                             fragment,
                             can_reorder=False,
                             can_add=False)
        return fragment

    def student_view(self, context):
        """
        Renders the view that learners see.
        """
        result = Fragment()
        child_frags = self.runtime.render_children(self, context=context)
        result.add_resources(child_frags)
        result.add_content('<div class="library-sourced-content">')
        for frag in child_frags:
            result.add_content(frag.content)
        result.add_content('</div>')
        return result

    def validate_field_data(self, validation, data):
        """
        Validate this block's field data. Instead of checking fields like self.name, check the
        fields set on data, e.g. data.name. This allows the same validation method to be re-used
        for the studio editor.
        """
        if len(data.source_block_ids) > self.MAX_BLOCKS_ALLOWED:
            # Because importing library blocks is an expensive operation
            validation.add(
                ValidationMessage(
                    ValidationMessage.ERROR,
                    _("A maximum of {0} components may be added.").format(
                        self.MAX_BLOCKS_ALLOWED)))

    def validate(self):
        """
        Validates the state of this library_sourced_xblock Instance. This is the override of the general XBlock method,
        and it will also ask its superclass to validate.
        """
        validation = super().validate()
        validation = StudioValidation.copy(validation)

        if not self.source_block_ids:
            validation.set_summary(
                StudioValidationMessage(
                    StudioValidationMessage.NOT_CONFIGURED,
                    _("No XBlock has been configured for this component. Use the editor to select the target blocks."
                      ),
                    action_class='edit-button',
                    action_label=_("Open Editor")))
        return validation

    @XBlock.handler
    def submit_studio_edits(self, data, suffix=''):
        """
        Save changes to this block, applying edits made in Studio.
        """
        response = super().submit_studio_edits(data, suffix)
        # Replace our current children with the latest ones from the libraries.
        lib_tools = self.runtime.service(self, 'library_tools')
        try:
            lib_tools.import_from_blockstore(self, self.source_block_ids)
        except Exception as err:  # pylint: disable=broad-except
            log.exception(err)
            return Response(_(
                "Importing Library Block failed - are the IDs valid and readable?"
            ),
                            status=400)
        return response