コード例 #1
0
 class FieldTester(XBlock):
     """Test XBlock for field access testing"""
     field_a = Integer(scope=Scope.settings)
     field_b = Integer(scope=Scope.content, default=10)
     field_c = Integer(scope=Scope.user_state, default=42)
     float_a = Float(scope=Scope.settings, default=5.8)
     float_b = Float(scope=Scope.settings)
コード例 #2
0
ファイル: test_xml_module.py プロジェクト: gopinath81/vmss
class TestFields(object):
    # Will be returned by editable_metadata_fields.
    max_attempts = Integer(scope=Scope.settings, default=1000, values={'min': 1, 'max': 10})
    # Will not be returned by editable_metadata_fields because filtered out by non_editable_metadata_fields.
    due = Date(scope=Scope.settings)
    # Will not be returned by editable_metadata_fields because is not Scope.settings.
    student_answers = Dict(scope=Scope.user_state)
    # Will be returned, and can override the inherited value from XModule.
    display_name = String(scope=Scope.settings, default='local default', display_name='Local Display Name',
                          help='local help')
    # Used for testing select type, effect of to_json method
    string_select = CrazyJsonString(
        scope=Scope.settings,
        default='default value',
        values=[{'display_name': 'first', 'value': 'value a'},
                {'display_name': 'second', 'value': 'value b'}]
    )
    showanswer = InheritanceMixin.showanswer
    # Used for testing select type
    float_select = Float(scope=Scope.settings, default=.999, values=[1.23, 0.98])
    # Used for testing float type
    float_non_select = Float(scope=Scope.settings, default=.999, values={'min': 0, 'step': .3})
    # Used for testing that Booleans get mapped to select type
    boolean_select = Boolean(scope=Scope.settings)
    # Used for testing Lists
    list_field = List(scope=Scope.settings, default=[])
コード例 #3
0
class TimeLimitFields(object):
    has_children = True

    beginning_at = Float(help="The time this timer was started", scope=Scope.user_state)
    ending_at = Float(help="The time this timer will end", scope=Scope.user_state)
    accomodation_code = String(help="A code indicating accommodations to be given the student", scope=Scope.user_state)
    time_expired_redirect_url = String(help="Url to redirect users to after the timelimit has expired", scope=Scope.settings)
    duration = Float(help="The length of this timer", scope=Scope.settings)
    suppress_toplevel_navigation = Boolean(help="Whether the toplevel navigation should be suppressed when viewing this module", scope=Scope.settings)
コード例 #4
0
ファイル: lti_module.py プロジェクト: rayhooker/edx-platform
class LTIFields(object):
    """
    Fields to define and obtain LTI tool from provider are set here,
    except credentials, which should be set in course settings::

    `lti_id` is id to connect tool with credentials in course settings. It should not contain :: (double semicolon)
    `launch_url` is launch URL of tool.
    `custom_parameters` are additional parameters to navigate to proper book and book page.

    For example, for Vitalsource provider, `launch_url` should be
    *https://bc-staging.vitalsource.com/books/book*,
    and to get to proper book and book page, you should set custom parameters as::

        vbid=put_book_id_here
        book_location=page/put_page_number_here

    Default non-empty URL for `launch_url` is needed due to oauthlib demand (URL scheme should be presented)::

    https://github.com/idan/oauthlib/blob/master/oauthlib/oauth1/rfc5849/signature.py#L136
    """
    display_name = String(display_name="Display Name", help="Display name for this module", scope=Scope.settings, default="LTI")
    lti_id = String(help="Id of the tool", default='', scope=Scope.settings)
    launch_url = String(help="URL of the tool", default='http://www.example.com', scope=Scope.settings)
    custom_parameters = List(help="Custom parameters (vbid, book_location, etc..)", scope=Scope.settings)
    open_in_a_new_page = Boolean(help="Should LTI be opened in new page?", default=True, scope=Scope.settings)
    graded = Boolean(help="Grades will be considered in overall score.", default=False, scope=Scope.settings)
    weight = Float(
        help="Weight for student grades.",
        default=1.0,
        scope=Scope.settings,
        values={"min": 0},
    )
    has_score = Boolean(help="Does this LTI module have score?", default=False, scope=Scope.settings)
コード例 #5
0
class InheritanceMixin(XBlockMixin):
    """Field definitions for inheritable fields"""

    graded = Boolean(
        help="Whether this module contributes to the final course grade",
        default=False,
        scope=Scope.settings)

    start = Date(help="Start time when this module is visible",
                 default=datetime.fromtimestamp(0, UTC),
                 scope=Scope.settings)
    due = Date(help="Date that this problem is due by", scope=Scope.settings)
    giturl = String(help="url root for course data git repository",
                    scope=Scope.settings)
    xqa_key = String(help="DO NOT USE", scope=Scope.settings)
    graceperiod = Timedelta(
        help=
        "Amount of time after the due date that submissions will be accepted",
        scope=Scope.settings)
    showanswer = String(help="When to show the problem answer to the student",
                        scope=Scope.settings,
                        default="finished")
    rerandomize = String(help="When to rerandomize the problem",
                         default="never",
                         scope=Scope.settings)
    days_early_for_beta = Float(
        help="Number of days early to show content to beta users",
        default=None,
        scope=Scope.settings)
    static_asset_path = String(
        help="Path to use for static assets - overrides Studio c4x://",
        scope=Scope.settings,
        default='')
コード例 #6
0
class InheritanceMixin(XBlockMixin):
    """Field definitions for inheritable fields."""

    graded = Boolean(
        help="Whether this module contributes to the final course grade",
        scope=Scope.settings,
        default=False,
    )
    start = Date(help="Start time when this module is visible",
                 default=datetime(2030, 1, 1, tzinfo=UTC),
                 scope=Scope.settings)
    due = Date(
        help="Date that this problem is due by",
        scope=Scope.settings,
    )
    extended_due = Date(
        help="Date that this problem is due by for a particular student. This "
        "can be set by an instructor, and will override the global due "
        "date if it is set to a date that is later than the global due "
        "date.",
        default=None,
        scope=Scope.user_state,
    )
    giturl = String(
        help="url root for course data git repository",
        scope=Scope.settings,
    )
    xqa_key = String(help="DO NOT USE", scope=Scope.settings)
    graceperiod = Timedelta(
        help=
        "Amount of time after the due date that submissions will be accepted",
        scope=Scope.settings,
    )
    showanswer = String(
        help="When to show the problem answer to the student",
        scope=Scope.settings,
        default="finished",
    )
    rerandomize = String(
        help="When to rerandomize the problem",
        scope=Scope.settings,
        default="never",
    )
    days_early_for_beta = Float(
        help="Number of days early to show content to beta users",
        scope=Scope.settings,
        default=None,
    )
    static_asset_path = String(
        help="Path to use for static assets - overrides Studio c4x://",
        scope=Scope.settings,
        default='',
    )
    text_customization = Dict(
        help="String customization substitutions for particular locations",
        scope=Scope.settings,
    )
    use_latex_compiler = Boolean(help="Enable LaTeX templates?",
                                 default=False,
                                 scope=Scope.settings)
コード例 #7
0
    def test_float_from_NaN_is_nan(self):  # pylint: disable=invalid-name
        """Test parsing of NaN.

        This special test case is necessary since
        float('nan') compares inequal to everything.
        """
        result = Float().from_string('NaN')
        self.assertTrue(math.isnan(result))
コード例 #8
0
class PeerGradingFields(object):
    use_for_single_location = Boolean(
        display_name=_("Show Single Problem"),
        help=
        _('When True, only the single problem specified by "Link to Problem Location" is shown. '
          'When False, a panel is displayed with all problems available for peer grading.'
          ),
        default=False,
        scope=Scope.settings)
    link_to_location = Reference(
        display_name=_("Link to Problem Location"),
        help=
        _('The location of the problem being graded. Only used when "Show Single Problem" is True.'
          ),
        default="",
        scope=Scope.settings)
    graded = Boolean(
        display_name=_("Graded"),
        help=
        _('Defines whether the student gets credit for grading this problem. Only used when "Show Single Problem" is True.'
          ),
        default=False,
        scope=Scope.settings)
    due = Date(help=_("Due date that should be displayed."),
               scope=Scope.settings)
    extended_due = Date(
        help=_(
            "Date that this problem is due by for a particular student. This "
            "can be set by an instructor, and will override the global due "
            "date if it is set to a date that is later than the global due "
            "date."),
        default=None,
        scope=Scope.user_state,
    )
    graceperiod = Timedelta(help=_("Amount of grace to give on the due date."),
                            scope=Scope.settings)
    student_data_for_location = Dict(
        help=_("Student data for a given peer grading problem."),
        scope=Scope.user_state)
    weight = Float(
        display_name=_("Problem Weight"),
        help=
        _("Defines the number of points each problem is worth. If the value is not set, each problem is worth one point."
          ),
        scope=Scope.settings,
        values={
            "min": 0,
            "step": ".1"
        },
        default=1)
    display_name = String(display_name=_("Display Name"),
                          help=_("Display name for this module"),
                          scope=Scope.settings,
                          default=_("Peer Grading Interface"))
    data = String(help=_("Html contents to display for this module"),
                  default='<peergrading></peergrading>',
                  scope=Scope.content)
コード例 #9
0
class QuestionMixin(EnumerableChildMixin):
    """
    An XBlock mixin for a child block that is a "Step".

    A step is a question that the user can answer (as opposed to a read-only child).
    """
    CAPTION = _(u"Question")

    has_author_view = True

    # Fields:
    name = String(
        display_name=_("Question ID (name)"),
        help=_("The ID of this question (required). Should be unique within this mentoring component."),
        default=UNIQUE_ID,
        scope=Scope.settings,  # Must be scope.settings, or the unique ID will change every time this block is edited
    )
    display_name = String(
        display_name=_("Question title"),
        help=_('Leave blank to use the default ("Question 1", "Question 2", etc.)'),
        default="",  # Blank will use 'Question x' - see display_name_with_default
        scope=Scope.content
    )
    weight = Float(
        display_name=_("Weight"),
        help=_("Defines the maximum total grade of this question."),
        default=1,
        scope=Scope.content,
        enforce_type=True
    )

    @lazy
    def siblings(self):
        return self.get_parent().step_ids

    def author_view(self, context):
        context = context.copy() if context else {}
        context['hide_header'] = True
        return self.mentoring_view(context)

    def author_preview_view(self, context):
        context = context.copy() if context else {}
        context['hide_header'] = True
        return self.student_view(context)

    def assessment_step_view(self, context=None):
        """
        assessment_step_view is the same as mentoring_view, except its DIV will have a different
        class (.xblock-v1-assessment_step_view) that we use for assessments to hide all the
        steps with CSS and to detect which children of mentoring are "Steps" and which are just
        decorative elements/instructions.
        """
        return self.mentoring_view(context)
コード例 #10
0
ファイル: lms_mixin.py プロジェクト: edxblocks/xblock-sqli
class LmsCompatibilityMixin(object):
    """
    Extra fields and methods used by LMS/Studio.
    """
    # Studio the default value for this field to show this XBlock
    # in the list of "Advanced Components"
    display_name = String(
        default="SQL Injection capture-the-flag", scope=Scope.settings,
        help="Display name"
    )

    start = DateTime(
        default=None, scope=Scope.settings,
        help="ISO-8601 formatted string representing the start date of this assignment."
    )

    due = DateTime(
        default=None, scope=Scope.settings,
        help="ISO-8601 formatted string representing the due date of this assignment."
    )

    weight = Float(
        display_name="Problem Weight",
        help="Defines the number of points this problem is worth.",
        values={"min": 0, "step": .1},
        default=1.0,
        scope=Scope.settings
    )

    def has_dynamic_children(self):
        """Do we dynamically determine our children? No, we don't have any.

        The LMS wants to know this to see if it has to instantiate our module
        and query it to find the children, or whether it can just trust what's
        in the static (cheaper) children listing.
        """
        return False

    @property
    def has_score(self):
        """Are we a scored type (read: a problem). Yes.

        For LMS Progress page/grades download purposes, we're always going to
        have a score, even if it's just 0 at the start.
        """
        return True

    def max_score(self):
        """The maximum raw score of our problem.
        """
        return self.weight
コード例 #11
0
class PeerGradingFields(object):
    use_for_single_location = Boolean(
        display_name="Show Single Problem",
        help='When True, only the single problem specified by "Link to Problem Location" is shown. '
             'When False, a panel is displayed with all problems available for peer grading.',
        default=False,
        scope=Scope.settings
    )
    link_to_location = String(
        display_name="Link to Problem Location",
        help='The location of the problem being graded. Only used when "Show Single Problem" is True.',
        default="",
        scope=Scope.settings
    )
    graded = Boolean(
        display_name="Graded",
        help='Defines whether the student gets credit for grading this problem. Only used when "Show Single Problem" is True.',
        default=False,
        scope=Scope.settings
    )
    due = Date(
        help="Due date that should be displayed.",
        scope=Scope.settings)
    graceperiod = Timedelta(
        help="Amount of grace to give on the due date.",
        scope=Scope.settings
    )
    student_data_for_location = Dict(
        help="Student data for a given peer grading problem.",
        scope=Scope.user_state
    )
    weight = Float(
        display_name="Problem Weight",
        help="Defines the number of points each problem is worth. If the value is not set, each problem is worth one point.",
        scope=Scope.settings, values={"min": 0, "step": ".1"},
        default=1
    )
    display_name = String(
        display_name="Display Name",
        help="Display name for this module",
        scope=Scope.settings,
        default="Peer Grading Interface"
    )
    data = String(
        help="Html contents to display for this module",
        default='<peergrading></peergrading>',
        scope=Scope.content
    )
コード例 #12
0
ファイル: mixins.py プロジェクト: appsembler/appsembleredx
class CreditsMixin(XBlockMixin):
    """
    Mixin that allows an author to specify a credit provider and a number of credit
    units.
    """
    credit_provider = String(
        display_name=_("Credit Provider"),
        help=_("Name of the entity providing the credit units"),
        values=build_field_values(CREDIT_PROVIDERS),
        default=CREDIT_PROVIDERS_DEFAULT,
        scope=Scope.settings,
    )

    credits = Float(
        display_name=_("Credits"),
        help=_("Number of credits"),
        default=None,
        scope=Scope.settings,
    )

    credit_unit = String(
        display_name=_("Credit Unit"),
        help=_("Name of unit of credits; e.g., hours"),
        default=_("hours"),
        scope=Scope.settings,
    )

    accreditation_conferred = String(
        display_name=_("Accreditation Conferred"),
        help=ACCREDITATION_CONFERRED_HELP,
        default=None,
        scope=Scope.settings,
    )

    @classmethod
    def definition_from_xml(cls, definition, children):
        print "in CreditsMixin definition_to_xml"
        return definition, children

    def definition_to_xml(self, xml_object):
        print "in CreditsMixin definition_to_xml"
        for field in ('credit_provider', 'credits', 'credit_unit',
                      'accreditation_conferred'):
            if getattr(self, field, None):
                xml_object.set(field, str(getattr(self, field)))
        return xml_object
コード例 #13
0
class InheritanceMixin(XBlockMixin):
    """Field definitions for inheritable fields."""

    graded = Boolean(
        help="Whether this module contributes to the final course grade",
        scope=Scope.settings,
        default=False,
    )
    start = Date(help="Start time when this module is visible",
                 default=DEFAULT_START_DATE,
                 scope=Scope.settings)
    due = Date(
        display_name=_("Due Date"),
        help=_("Enter the default date by which problems are due."),
        scope=Scope.settings,
    )
    visible_to_staff_only = Boolean(
        help=
        _("If true, can be seen only by course staff, regardless of start date."
          ),
        default=False,
        scope=Scope.settings,
    )
    course_edit_method = String(
        display_name=_("Course Editor"),
        help=
        _("Enter the method by which this course is edited (\"XML\" or \"Studio\")."
          ),
        default="Studio",
        scope=Scope.settings,
        deprecated=
        True  # Deprecated because user would not change away from Studio within Studio.
    )
    giturl = String(
        display_name=_("GIT URL"),
        help=_("Enter the URL for the course data GIT repository."),
        scope=Scope.settings)
    xqa_key = String(display_name=_("XQA Key"),
                     help=_("This setting is not currently supported."),
                     scope=Scope.settings,
                     deprecated=True)
    graceperiod = Timedelta(
        help=
        "Amount of time after the due date that submissions will be accepted",
        scope=Scope.settings,
    )
    group_access = Dict(
        help=_(
            "Enter the ids for the content groups this problem belongs to."),
        scope=Scope.settings,
    )

    showanswer = String(
        display_name=_("Show Answer"),
        help=_(
            # Translators: DO NOT translate the words in quotes here, they are
            # specific words for the acceptable values.
            'Specify when the Show Answer button appears for each problem. '
            'Valid values are "always", "answered", "attempted", "closed", '
            '"finished", "past_due", "correct_or_past_due", and "never".'),
        scope=Scope.settings,
        default="finished",
    )

    show_correctness = String(
        display_name=_("Show Results"),
        help=_(
            # Translators: DO NOT translate the words in quotes here, they are
            # specific words for the acceptable values.
            'Specify when to show answer correctness and score to learners. '
            'Valid values are "always", "never", and "past_due".'),
        scope=Scope.settings,
        default="always",
    )

    rerandomize = String(
        display_name=_("Randomization"),
        help=_(
            # Translators: DO NOT translate the words in quotes here, they are
            # specific words for the acceptable values.
            'Specify the default for how often variable values in a problem are randomized. '
            'This setting should be set to "never" unless you plan to provide a Python '
            'script to identify and randomize values in most of the problems in your course. '
            'Valid values are "always", "onreset", "never", and "per_student".'
        ),
        scope=Scope.settings,
        default="never",
    )
    days_early_for_beta = Float(
        display_name=_("Days Early for Beta Users"),
        help=
        _("Enter the number of days before the start date that beta users can access the course."
          ),
        scope=Scope.settings,
        default=None,
    )
    static_asset_path = String(
        display_name=_("Static Asset Path"),
        help=
        _("Enter the path to use for files on the Files & Uploads page. This value overrides the Studio default, c4x://."
          ),
        scope=Scope.settings,
        default='',
    )
    use_latex_compiler = Boolean(
        display_name=_("Enable LaTeX Compiler"),
        help=
        _("Enter true or false. If true, you can use the LaTeX templates for HTML components and advanced Problem components."
          ),
        default=False,
        scope=Scope.settings)
    max_attempts = Integer(
        display_name=_("Maximum Attempts"),
        help=
        _("Enter the maximum number of times a student can try to answer problems. By default, Maximum Attempts is set to null, meaning that students have an unlimited number of attempts for problems. You can override this course-wide setting for individual problems. However, if the course-wide setting is a specific number, you cannot set the Maximum Attempts for individual problems to unlimited."
          ),
        values={"min": 0},
        scope=Scope.settings)
    matlab_api_key = String(
        display_name=_("Matlab API key"),
        help=
        _("Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. "
          "This key is granted for exclusive use in this course for the specified duration. "
          "Do not share the API key with other courses. Notify MathWorks immediately "
          "if you believe the key is exposed or compromised. To obtain a key for your course, "
          "or to report an issue, please contact [email protected]"),
        scope=Scope.settings)
    # This is should be scoped to content, but since it's defined in the policy
    # file, it is currently scoped to settings.
    user_partitions = UserPartitionList(
        display_name=_("Group Configurations"),
        help=
        _("Enter the configurations that govern how students are grouped together."
          ),
        default=[],
        scope=Scope.settings)
    video_speed_optimizations = Boolean(
        display_name=_("Enable video caching system"),
        help=
        _("Enter true or false. If true, video caching will be used for HTML5 videos."
          ),
        default=True,
        scope=Scope.settings)
    video_auto_advance = Boolean(
        display_name=_("Enable video auto-advance"),
        help=
        _("Specify whether to show an auto-advance button in videos. If the student clicks it, when the last video in a unit finishes it will automatically move to the next unit and autoplay the first video."
          ),
        scope=Scope.settings,
        default=False)
    video_bumper = Dict(
        display_name=_("Video Pre-Roll"),
        help=
        _("Identify a video, 5-10 seconds in length, to play before course videos. Enter the video ID from "
          "the Video Uploads page and one or more transcript files in the following format: {format}. "
          "For example, an entry for a video with two transcripts looks like this: {example}"
          ),
        help_format_args=dict(
            format=
            '{"video_id": "ID", "transcripts": {"language": "/static/filename.srt"}}',
            example=
            ('{'
             '"video_id": "77cef264-d6f5-4cf2-ad9d-0178ab8c77be", '
             '"transcripts": {"en": "/static/DemoX-D01_1.srt", "uk": "/static/DemoX-D01_1_uk.srt"}'
             '}'),
        ),
        scope=Scope.settings)

    reset_key = "DEFAULT_SHOW_RESET_BUTTON"
    default_reset_button = getattr(settings, reset_key) if hasattr(
        settings, reset_key) else False
    show_reset_button = Boolean(
        display_name=_("Show Reset Button for Problems"),
        help=
        _("Enter true or false. If true, problems in the course default to always displaying a 'Reset' button. "
          "You can override this in each problem's settings. All existing problems are affected when "
          "this course-wide setting is changed."),
        scope=Scope.settings,
        default=default_reset_button)
    edxnotes = Boolean(
        display_name=_("Enable Student Notes"),
        help=
        _("Enter true or false. If true, students can use the Student Notes feature."
          ),
        default=False,
        scope=Scope.settings)
    edxnotes_visibility = Boolean(
        display_name="Student Notes Visibility",
        help=_(
            "Indicates whether Student Notes are visible in the course. "
            "Students can also show or hide their notes in the courseware."),
        default=True,
        scope=Scope.user_info)

    in_entrance_exam = Boolean(
        display_name=_("Tag this module as part of an Entrance Exam section"),
        help=_(
            "Enter true or false. If true, answer submissions for problem modules will be "
            "considered in the Entrance Exam scoring/gating algorithm."),
        scope=Scope.settings,
        default=False)

    self_paced = Boolean(
        display_name=_('Self Paced'),
        help=
        _('Set this to "true" to mark this course as self-paced. Self-paced courses do not have '
          'due dates for assignments, and students can progress through the course at any rate before '
          'the course ends.'),
        default=False,
        scope=Scope.settings)
コード例 #14
0
class InheritanceMixin(XBlockMixin):
    """Field definitions for inheritable fields."""

    graded = Boolean(
        help="Whether this module contributes to the final course grade",
        scope=Scope.settings,
        default=False,
    )
    start = Date(help="Start time when this module is visible",
                 default=datetime(2030, 1, 1, tzinfo=UTC),
                 scope=Scope.settings)
    due = Date(
        display_name=_("Due Date"),
        help=_("Enter the default date by which problems are due."),
        scope=Scope.settings,
    )
    extended_due = Date(
        help="Date that this problem is due by for a particular student. This "
        "can be set by an instructor, and will override the global due "
        "date if it is set to a date that is later than the global due "
        "date.",
        default=None,
        scope=Scope.user_state,
    )
    lti_enabled = Boolean(
        display_name=_("Course as LTI Tool Provider"),
        help=
        _("Enter true or false. If true, subsections in the course can act as LTI tool providers."
          ),
        default=False,
        scope=Scope.settings,
    )
    visible_to_staff_only = Boolean(
        help=
        _("If true, can be seen only by course staff, regardless of start date."
          ),
        default=False,
        scope=Scope.settings,
    )
    group_access = Dict(
        help=
        "A dictionary that maps which groups can be shown this block. The keys "
        "are group configuration ids and the values are a list of group IDs. "
        "If there is no key for a group configuration or if the list of group IDs "
        "is empty then the block is considered visible to all. Note that this "
        "field is ignored if the block is visible_to_staff_only.",
        default={},
        scope=Scope.settings,
    )
    course_edit_method = String(
        display_name=_("Course Editor"),
        help=
        _("Enter the method by which this course is edited (\"XML\" or \"Studio\")."
          ),
        default="Studio",
        scope=Scope.settings,
        deprecated=
        True  # Deprecated because user would not change away from Studio within Studio.
    )
    giturl = String(
        display_name=_("GIT URL"),
        help=_("Enter the URL for the course data GIT repository."),
        scope=Scope.settings)
    xqa_key = String(display_name=_("XQA Key"),
                     help=_("This setting is not currently supported."),
                     scope=Scope.settings,
                     deprecated=True)
    annotation_storage_url = String(help=_(
        "Enter the location of the annotation storage server. The textannotation, videoannotation, and imageannotation advanced modules require this setting."
    ),
                                    scope=Scope.settings,
                                    default=
                                    "http://your_annotation_storage.com",
                                    display_name=_(
                                        "URL for Annotation Storage"))
    annotation_token_secret = String(help=_(
        "Enter the secret string for annotation storage. The textannotation, videoannotation, and imageannotation advanced modules require this string."
    ),
                                     scope=Scope.settings,
                                     default=
                                     "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
                                     display_name=_(
                                         "Secret Token String for Annotation"))
    graceperiod = Timedelta(
        help=
        "Amount of time after the due date that submissions will be accepted",
        scope=Scope.settings,
    )
    showanswer = String(
        display_name=_("Show Answer"),
        help=
        _("Specify when the Show Answer button appears for each problem. Valid values are \"always\", \"answered\", \"attempted\", \"closed\", \"finished\", \"past_due\", and \"never\"."
          ),
        scope=Scope.settings,
        default="finished",
    )
    rerandomize = String(
        display_name=_("Randomization"),
        help=
        _("Specify how often variable values in a problem are randomized when a student loads the problem. Valid values are \"always\", \"onreset\", \"never\", and \"per_student\". This setting only applies to problems that have randomly generated numeric values."
          ),
        scope=Scope.settings,
        default="never",
    )
    days_early_for_beta = Float(
        display_name=_("Days Early for Beta Users"),
        help=
        _("Enter the number of days before the start date that beta users can access the course."
          ),
        scope=Scope.settings,
        default=None,
    )
    static_asset_path = String(
        display_name=_("Static Asset Path"),
        help=
        _("Enter the path to use for files on the Files & Uploads page. This value overrides the Studio default, c4x://."
          ),
        scope=Scope.settings,
        default='',
    )
    text_customization = Dict(
        display_name=_("Text Customization"),
        help=_(
            "Enter string customization substitutions for particular locations."
        ),
        scope=Scope.settings,
    )
    use_latex_compiler = Boolean(
        display_name=_("Enable LaTeX Compiler"),
        help=
        _("Enter true or false. If true, you can use the LaTeX templates for HTML components and advanced Problem components."
          ),
        default=False,
        scope=Scope.settings)
    max_attempts = Integer(
        display_name=_("Maximum Attempts"),
        help=
        _("Enter the maximum number of times a student can try to answer problems. By default, Maximum Attempts is set to null, meaning that students have an unlimited number of attempts for problems. You can override this course-wide setting for individual problems. However, if the course-wide setting is a specific number, you cannot set the Maximum Attempts for individual problems to unlimited."
          ),
        values={"min": 0},
        scope=Scope.settings)
    matlab_api_key = String(
        display_name=_("Matlab API key"),
        help=
        _("Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. "
          "This key is granted for exclusive use in this course for the specified duration. "
          "Do not share the API key with other courses. Notify MathWorks immediately "
          "if you believe the key is exposed or compromised. To obtain a key for your course, "
          "or to report an issue, please contact [email protected]"),
        scope=Scope.settings)
    # This is should be scoped to content, but since it's defined in the policy
    # file, it is currently scoped to settings.
    user_partitions = UserPartitionList(
        display_name=_("Group Configurations"),
        help=
        _("Enter the configurations that govern how students are grouped together."
          ),
        default=[],
        scope=Scope.settings)
    video_speed_optimizations = Boolean(
        display_name=_("Enable video caching system"),
        help=
        _("Enter true or false. If true, video caching will be used for HTML5 videos."
          ),
        default=True,
        scope=Scope.settings)

    reset_key = "DEFAULT_SHOW_RESET_BUTTON"
    default_reset_button = getattr(settings, reset_key) if hasattr(
        settings, reset_key) else False
    show_reset_button = Boolean(
        display_name=_("Show Reset Button for Problems"),
        help=
        _("Enter true or false. If true, problems in the course default to always displaying a 'Reset' button. You can "
          "override this in each problem's settings. All existing problems are affected when this course-wide setting is changed."
          ),
        scope=Scope.settings,
        default=default_reset_button)
コード例 #15
0
class MentoringBlock(XBlockWithLightChildren, StepParentMixin):
    """
    An XBlock providing mentoring capabilities

    Composed of text, answers input fields, and a set of MRQ/MCQ with advices.
    A set of conditions on the provided answers and MCQ/MRQ choices will determine if the
    student is a) provided mentoring advices and asked to alter his answer, or b) is given the
    ok to continue.
    """

    @staticmethod
    def is_default_xml_content(value):
        return _is_default_xml_content(value)

    attempted = Boolean(help="Has the student attempted this mentoring step?",
                        default=False, scope=Scope.user_state)
    completed = Boolean(help="Has the student completed this mentoring step?",
                        default=False, scope=Scope.user_state)
    next_step = String(help="url_name of the next step the student must complete (global to all blocks)",
                       default='mentoring_first', scope=Scope.preferences)
    followed_by = String(help="url_name of the step after the current mentoring block in workflow",
                         default=None, scope=Scope.content)
    url_name = String(help="Name of the current step, used for URL building",
                      default='mentoring-default', scope=Scope.content)
    enforce_dependency = Boolean(help="Should the next step be the current block to complete?",
                                 default=False, scope=Scope.content, enforce_type=True)
    display_submit = Boolean(help="Allow submission of the current block?", default=True,
                             scope=Scope.content, enforce_type=True)
    xml_content = String(help="XML content", default=_default_xml_content, scope=Scope.content)
    weight = Float(help="Defines the maximum total grade of the block.",
                   default=1, scope=Scope.content, enforce_type=True)
    num_attempts = Integer(help="Number of attempts a user has answered for this questions",
                           default=0, scope=Scope.user_state, enforce_type=True)
    max_attempts = Integer(help="Number of max attempts for this questions", default=0,
                           scope=Scope.content, enforce_type=True)
    mode = String(help="Mode of the mentoring. 'standard' or 'assessment'",
                  default='standard', scope=Scope.content)
    step = Integer(help="Keep track of the student assessment progress.",
                   default=0, scope=Scope.user_state, enforce_type=True)
    student_results = List(help="Store results of student choices.", default=[],
                           scope=Scope.user_state)
    extended_feedback = Boolean(help="Show extended feedback details when all attempts are used up.",
                                default=False, Scope=Scope.content)
    display_name = String(help="Display name of the component", default="Mentoring XBlock",
                          scope=Scope.settings)
    icon_class = 'problem'
    has_score = True

    MENTORING_MODES = ('standard', 'assessment')

    FLOATING_BLOCKS = (TitleBlock, MentoringMessageBlock, SharedHeaderBlock)

    FIELDS_TO_INIT = ('xml_content',)

    @property
    def is_assessment(self):
        return self.mode == 'assessment'

    def get_question_number(self, question_id):
        """
        Get the step number of the question id
        """
        for question in self.get_children_objects():
            if hasattr(question, 'step_number') and (question.name == question_id):
                return question.step_number
        raise ValueError("Question ID in answer set not a step of this Mentoring Block!")

    def answer_mapper(self, answer_status):
        """
        Create a JSON-dumpable object with readable key names from a list of student answers.
        """
        answer_map = []
        for answer in self.student_results:
            if answer[1]['status'] == answer_status:
                try:
                    answer_map.append({
                        'number': self.get_question_number(answer[0]),
                        'id': answer[0],
                        'details': answer[1],
                    })
                except ValueError:
                    pass
        return answer_map

    @property
    def score(self):
        """Compute the student score taking into account the light child weight."""
        total_child_weight = sum(float(step.weight) for step in self.steps)
        if total_child_weight == 0:
            return Score(0, 0, [], [], [])
        steps_map = {q.name: q for q in self.steps}
        points_earned = 0
        for q_name, q_details in self.student_results:
            question = steps_map.get(q_name)
            if question:
                points_earned += q_details['score'] * question.weight
        score = points_earned / total_child_weight
        correct = self.answer_mapper(CORRECT)
        incorrect = self.answer_mapper(INCORRECT)
        partially_correct = self.answer_mapper(PARTIAL)

        return Score(score, int(round(score * 100)), correct, incorrect, partially_correct)

    @property
    def assessment_message(self):
        if not self.max_attempts_reached:
            return self.get_message_html('on-assessment-review')
        else:
            return None

    def show_extended_feedback(self):
        return self.extended_feedback and self.max_attempts_reached

    def feedback_dispatch(self, target_data, stringify):
        if self.show_extended_feedback():
            if stringify:
                return json.dumps(target_data)
            else:
                return target_data

    def correct_json(self, stringify=True):
        return self.feedback_dispatch(self.score.correct, stringify)

    def incorrect_json(self, stringify=True):
        return self.feedback_dispatch(self.score.incorrect, stringify)

    def partial_json(self, stringify=True):
        return self.feedback_dispatch(self.score.partially_correct, stringify)

    def student_view(self, context):
        # Migrate stored data if necessary
        self.migrate_fields()

        # Validate self.step:
        num_steps = len([child for child in self.get_children_objects() if not isinstance(child, self.FLOATING_BLOCKS)])
        if self.step > num_steps:
            self.step = num_steps

        fragment, named_children = self.get_children_fragment(
            context, view_name='mentoring_view',
            not_instance_of=self.FLOATING_BLOCKS,
        )

        fragment.add_content(loader.render_template('templates/html/mentoring.html', {
            'self': self,
            'named_children': named_children,
            'missing_dependency_url': self.has_missing_dependency and self.next_step_url,
        }))
        fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/mentoring.css'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/vendor/underscore-min.js'))

        js_view = 'mentoring_assessment_view.js' if self.is_assessment else 'mentoring_standard_view.js'
        fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/'+js_view))

        fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/mentoring.js'))
        fragment.add_resource(loader.load_unicode('templates/html/mentoring_attempts.html'), "text/html")
        fragment.add_resource(loader.load_unicode('templates/html/mentoring_grade.html'), "text/html")
        fragment.add_resource(loader.load_unicode('templates/html/mentoring_review_questions.html'), "text/html")

        fragment.initialize_js('MentoringBlock')

        if not self.display_submit:
            self.runtime.publish(self, 'progress', {})

        return fragment

    def migrate_fields(self):
        """
        Migrate data stored in the fields, when a format change breaks backward-compatibility with
        previous data formats
        """
        # Partial answers replaced the `completed` with `status` in `self.student_results`
        if self.student_results and 'completed' in self.student_results[0][1]:
            # Rename the field and use the new value format (text instead of boolean)
            for result in self.student_results:
                result[1]['status'] = CORRECT if result[1]['completed'] else INCORRECT
                del result[1]['completed']

    @property
    def additional_publish_event_data(self):
        return {
            'user_id': self.scope_ids.user_id,
            'component_id': self.url_name,
        }

    @property
    def title(self):
        """
        Returns the title child.
        """
        for child in self.get_children_objects():
            if isinstance(child, TitleBlock):
                return child
        return None

    @property
    def header(self):
        """
        Return the header child.
        """
        for child in self.get_children_objects():
            if isinstance(child, SharedHeaderBlock):
                return child
        return None

    @property
    def has_missing_dependency(self):
        """
        Returns True if the student needs to complete another step before being able to complete
        the current one, and False otherwise
        """
        return self.enforce_dependency and (not self.completed) and (self.next_step != self.url_name)

    @property
    def next_step_url(self):
        """
        Returns the URL of the next step's page
        """
        return '/jump_to_id/{}'.format(self.next_step)

    @XBlock.json_handler
    def get_results(self, queries, suffix=''):
        """
        Gets detailed results in the case of extended feedback.

        It may be a good idea to eventually have this function get results
        in the general case instead of loading them in the template in the future,
        and only using it for extended feedback situations.

        Right now there are two ways to get results-- through the template upon loading up
        the mentoring block, or after submission of an AJAX request like in
        submit or get_results here.
        """
        results = []
        if not self.show_extended_feedback():
            return {
                'results': [],
                'error': 'Extended feedback results cannot be obtained.'
            }
        completed = True
        choices = dict(self.student_results)
        step = self.step
        # Only one child should ever be of concern with this method.
        for child in self.get_children_objects():
            if child.name and child.name in queries:
                results = [child.name, child.get_results(choices[child.name])]
                # Children may have their own definition of 'completed' which can vary from the general case
                # of the whole mentoring block being completed. This is because in standard mode, all children
                # must be correct to complete the block. In assessment mode with extended feedback, completion
                # happens when you're out of attempts, no matter how you did.
                completed = choices[child.name]['status']
                break

        # The 'completed' message should always be shown in this case, since no more attempts are available.
        message = self.get_message(True)

        return {
            'results': results,
            'completed': completed,
            'attempted': self.attempted,
            'message': message,
            'step': step,
            'max_attempts': self.max_attempts,
            'num_attempts': self.num_attempts,
        }

    def get_message(self, completed):
        if self.max_attempts_reached:
            return self.get_message_html('max_attempts_reached')
        elif completed:
            return self.get_message_html('completed')
        else:
            return self.get_message_html('incomplete')

    @XBlock.json_handler
    def submit(self, submissions, suffix=''):
        log.info(u'Received submissions: {}'.format(submissions))
        self.attempted = True

        if self.is_assessment:
            return self.handleAssessmentSubmit(submissions, suffix)

        submit_results = []
        completed = True
        for child in self.get_children_objects():
            if child.name and child.name in submissions:
                submission = submissions[child.name]
                child_result = child.submit(submission)
                submit_results.append([child.name, child_result])
                child.save()
                completed = completed and (child_result['status'] == CORRECT)

        message = self.get_message(completed)

        # Once it has been completed once, keep completion even if user changes values
        if self.completed:
            completed = True

        # server-side check to not set completion if the max_attempts is reached
        if self.max_attempts_reached:
            completed = False

        if self.has_missing_dependency:
            completed = False
            message = 'You need to complete all previous steps before being able to complete the current one.'
        elif completed and self.next_step == self.url_name:
            self.next_step = self.followed_by

        # Once it was completed, lock score
        if not self.completed:
            # save user score and results
            while self.student_results:
                self.student_results.pop()
            for result in submit_results:
                self.student_results.append(result)

            self.runtime.publish(self, 'grade', {
                'value': self.score.raw,
                'max_value': 1,
            })

        if not self.completed and self.max_attempts > 0:
            self.num_attempts += 1

        self.completed = completed is True

        raw_score = self.score.raw

        self.publish_event_from_dict('xblock.mentoring.submitted', {
            'num_attempts': self.num_attempts,
            'submitted_answer': submissions,
            'grade': raw_score,
        })

        return {
            'results': submit_results,
            'completed': self.completed,
            'attempted': self.attempted,
            'message': message,
            'max_attempts': self.max_attempts,
            'num_attempts': self.num_attempts
        }

    def handleAssessmentSubmit(self, submissions, suffix):
        completed = False
        current_child = None
        children = [child for child in self.get_children_objects()
                    if not isinstance(child, self.FLOATING_BLOCKS)]

        assessment_message = None

        for child in children:
            if child.name and child.name in submissions:
                submission = submissions[child.name]

                # Assessment mode doesn't allow to modify answers
                # This will get the student back at the step he should be
                current_child = child
                step = children.index(child)
                if self.step > step or self.max_attempts_reached:
                    step = self.step
                    completed = False
                    break

                self.step = step + 1

                child_result = child.submit(submission)
                if 'tips' in child_result:
                    del child_result['tips']
                self.student_results.append([child.name, child_result])
                child.save()
                completed = child_result['status']

        event_data = {}

        score = self.score

        if current_child == self.steps[-1]:
            log.info(u'Last assessment step submitted: {}'.format(submissions))
            if not self.max_attempts_reached:
                self.runtime.publish(self, 'grade', {
                    'value': score.raw,
                    'max_value': 1,
                    'score_type': 'proficiency',
                })
                event_data['final_grade'] = score.raw
                assessment_message = self.assessment_message

            self.num_attempts += 1
            self.completed = True

        event_data['exercise_id'] = current_child.name
        event_data['num_attempts'] = self.num_attempts
        event_data['submitted_answer'] = submissions

        self.publish_event_from_dict('xblock.mentoring.assessment.submitted', event_data)

        return {
            'completed': completed,
            'attempted': self.attempted,
            'max_attempts': self.max_attempts,
            'num_attempts': self.num_attempts,
            'step': self.step,
            'score': score.percentage,
            'correct_answer': len(score.correct),
            'incorrect_answer': len(score.incorrect),
            'partially_correct_answer': len(score.partially_correct),
            'extended_feedback': self.show_extended_feedback() or '',
            'correct': self.correct_json(stringify=False),
            'incorrect': self.incorrect_json(stringify=False),
            'partial': self.partial_json(stringify=False),
            'assessment_message': assessment_message,
        }

    @XBlock.json_handler
    def try_again(self, data, suffix=''):

        if self.max_attempts_reached:
            return {
                'result': 'error',
                'message': 'max attempts reached'
            }

        # reset
        self.step = 0
        self.completed = False

        while self.student_results:
            self.student_results.pop()

        return {
            'result': 'success'
        }

    @property
    def max_attempts_reached(self):
        return self.max_attempts > 0 and self.num_attempts >= self.max_attempts

    def get_message_fragment(self, message_type):
        for child in self.get_children_objects():
            if isinstance(child, MentoringMessageBlock) and child.type == message_type:
                frag = self.render_child(child, 'mentoring_view', {})
                return self.fragment_text_rewriting(frag)

    def get_message_html(self, message_type):
        fragment = self.get_message_fragment(message_type)
        if fragment:
            return fragment.body_html()
        else:
            return ''

    def studio_view(self, context):
        """
        Editing view in Studio
        """
        fragment = Fragment()
        fragment.add_content(loader.render_template('templates/html/mentoring_edit.html', {
            'self': self,
            'xml_content': self.xml_content,
        }))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/mentoring_edit.js'))
        fragment.add_css_url(
            self.runtime.local_resource_url(self, 'public/css/mentoring_edit.css'))

        fragment.initialize_js('MentoringEditBlock')

        return fragment

    @XBlock.json_handler
    def studio_submit(self, submissions, suffix=''):
        log.info(u'Received studio submissions: {}'.format(submissions))

        xml_content = submissions['xml_content']
        try:
            content = etree.parse(StringIO(xml_content))
        except etree.XMLSyntaxError as e:
            response = {
                'result': 'error',
                'message': e.message
            }

        else:
            success = True
            root = content.getroot()
            if 'mode' in root.attrib:
                if root.attrib['mode'] not in self.MENTORING_MODES:
                    response = {
                        'result': 'error',
                        'message': "Invalid mentoring mode: should be 'standard' or 'assessment'"
                    }
                    success = False
                elif root.attrib['mode'] == 'assessment' and 'max_attempts' not in root.attrib:
                    # assessment has a default of 2 max_attempts
                    root.attrib['max_attempts'] = '2'

            if success:
                response = {
                    'result': 'success',
                }
                self.xml_content = etree.tostring(content, pretty_print=True)

        log.debug(u'Response from Studio: {}'.format(response))
        return response

    @property
    def url_name_with_default(self):
        """
        Ensure the `url_name` is set to a unique, non-empty value.
        This should ideally be handled by Studio, but we need to declare the attribute
        to be able to use it from the workbench, and when this happen Studio doesn't set
        a unique default value - this property gives either the set value, or if none is set
        a randomized default value
        """
        if self.url_name == 'mentoring-default':
            return 'mentoring-{}'.format(uuid.uuid4())
        else:
            return self.url_name

    @staticmethod
    def workbench_scenarios():
        """
        Scenarios displayed by the workbench. Load them from external (private) repository
        """
        return loader.load_scenarios_from_path('templates/xml')
コード例 #16
0
class HastexoXBlock(XBlock, XBlockWithSettingsMixin, ScorableXBlockMixin,
                    StudioEditableXBlockMixin,
                    StudioContainerWithNestedXBlocksMixin):
    """
    Provides lab environments and an SSH connection to them.

    """
    CATEGORY = "hastexo"
    STUDIO_LABEL = "hastexo XBlock"

    # Settings with defaults.
    display_name = String(default="hastexo XBlock",
                          scope=Scope.settings,
                          help="Title to display")
    weight = Float(default=1,
                   scope=Scope.settings,
                   help="Defines the maximum total grade of the block.")

    # Mandatory: must be set per instance.
    stack_user_name = String(
        scope=Scope.settings,
        help="The name of the training user in the stack.")
    stack_protocol = String(values=["ssh", "rdp", "vnc"],
                            default="ssh",
                            scope=Scope.settings,
                            help="What protocol to use for the connection. "
                            "Currently, \"ssh\", \"rdp\", or \"vnc\".")

    # Optional
    stack_template_path = String(
        scope=Scope.settings,
        help="The relative path to the uploaded orchestration template. "
        "For example, \"hot_lab.yaml\".")
    launch_timeout = Integer(
        default=None,
        scope=Scope.settings,
        help="Timeout for a launch operation, in seconds.  Takes precedence"
        "over the globally defined timeout.")
    suspend_timeout = Integer(
        default=None,
        scope=Scope.settings,
        help="Timeout for how long to wait before suspending a stack, after "
        "the last keepalive was received from the browser, in seconds. "
        "Takes precedence over the globally defined timeout.")
    hook_script = String(
        scope=Scope.settings,
        help="The relative path to an uploaded executable script. "
        "For example, \"hot_script.sh\".")
    delete_age = String(
        default=None,
        scope=Scope.settings,
        help="Delete stacks that haven't been resumed in this many seconds. "
        "Overrides the globally defined setting.")
    read_only = Boolean(default=False,
                        scope=Scope.settings,
                        help="Display the terminal window in read-only mode")
    hidden = Boolean(
        default=False,
        scope=Scope.settings,
        help="Hide the terminal window while running the background tasks. ")
    progress_check_label = String(
        default='Check Progress',
        scope=Scope.settings,
        help="Set the progress check button label. "
        "For example: \"Submit Answer\" or \"Check Progress\"(Default).")
    show_feedback = Boolean(
        default=True,
        scope=Scope.settings,
        help="On progress check, show feedback on how many tasks out of total "
        "are completed.")
    show_hints_on_error = Boolean(
        default=True,
        scope=Scope.settings,
        help="On progress check failure, display the tests' standard error "
        "streams as hints. When 'show_feedback' is set to False, hints "
        "will never be displayed and setting this to True will have no "
        "effect.")
    progress_check_result_heading = String(
        default='Progress check result',
        scope=Scope.settings,
        help="Message to display on progress check result window. This could "
        "be set to \"Answer Submitted\" for example, when choosing to "
        "not display hints and feedback. Default is \"Progress check "
        "result\".")

    # Set via XML
    hook_events = Dict(
        default=None,
        scope=Scope.settings,
        enforce_type=True,
        help="A dictionary of (string, boolean) pairs, where `string` is any "
        "of \"suspend\", \"resume\", and \"delete\".")
    ports = List(default=[],
                 scope=Scope.settings,
                 enforce_type=True,
                 help="What ports are available in the stack.")
    providers = List(default=[],
                     scope=Scope.settings,
                     enforce_type=True,
                     help="List of providers to launch the stack in.")
    tests = List(default=[],
                 scope=Scope.content,
                 enforce_type=True,
                 help="The list of tests to run.")

    # Deprecated in favor of "providers"
    provider = String(default="",
                      scope=Scope.settings,
                      help="Where to launch the stack. (DEPRECATED)")

    # User state, per instance.
    stack_run = String(default="",
                       scope=Scope.user_state,
                       help="The name of the run")
    stack_name = String(default="",
                        scope=Scope.user_state,
                        help="The name of the user's stack")
    stack_provider = String(
        default="",
        scope=Scope.user_state,
        help="The provider selected for the current launch of the stack")
    check_id = String(default="",
                      scope=Scope.user_state,
                      help="The check task id")
    check_timestamp = Integer(default=None,
                              scope=Scope.user_state,
                              help="When the check task was launched")
    check_status = Dict(default=None,
                        scope=Scope.user_state,
                        help="The check status")
    score = Dict(default=None,
                 scope=Scope.user_state,
                 help="Dictionary with the current student score")

    editable_fields = ('display_name', 'progress_check_label',
                       'progress_check_result_heading', 'show_feedback',
                       'show_hints_on_error', 'weight', 'stack_template_path',
                       'hook_script', 'hook_events', 'stack_user_name',
                       'stack_protocol', 'launch_timeout', 'suspend_timeout',
                       'delete_age', 'ports', 'providers', 'tests',
                       'read_only', 'hidden')

    has_author_view = True
    has_score = True
    has_children = True
    icon_class = 'problem'
    block_settings_key = SETTINGS_KEY

    def parse_attributes(tag, node, block):
        """
        Handle parsing tests, ports and providers.
        """
        if tag == "test":
            text = node.text

            # Fix up whitespace.
            if text[0] == "\n":
                text = text[1:]
            text.rstrip()
            text = textwrap.dedent(text)

            block.tests.append(text)

        elif tag == "port":
            # port must have values for 'name' and 'number'
            # raises KeyError for each if not defined
            port = {
                "name": node.attrib["name"],
                "number": int(node.attrib["number"])
            }
            block.ports.append(port)

        elif tag == "provider":
            # raises KeyError if 'name' is not defined
            # one must not add a provider without a name
            name = node.attrib["name"]
            capacity = node.attrib.get("capacity", None)
            if capacity in (None, "None", ""):
                # capacity should not be undefined
                # set to -1 (unlimited) in case it is
                capacity = -1
            else:
                # This will raise a TypeError if the string literal
                # cannot be converted
                capacity = int(capacity)
            provider = {"name": name, "capacity": capacity}
            # template and environment are not required
            # add to provider only when they have non empty values
            template = node.attrib.get("template", None)
            if template not in (None, "None"):
                provider["template"] = template
            environment = node.attrib.get("environment", None)
            if environment not in (None, "None"):
                provider["environment"] = environment
            block.providers.append(provider)

    @classmethod
    def parse_xml(cls, node, runtime, keys, id_generator):
        """
        Use `node` to construct a new block.
        """
        block = runtime.construct_xblock_from_class(cls, keys)

        if 'filename' in node.attrib:
            # Read xml content from file.
            url_name = node.get('url_name', node.get('slug'))
            location = id_generator.create_definition(node.tag, url_name)

            filename = node.get('filename')
            pointer_path = "{category}/{url_path}".format(
                category='hastexo',
                url_path=location.block_id.replace(':', '/'))
            base = os.path.dirname(pointer_path)
            filepath = u"{base}/{name}.xml".format(base=base, name=filename)

            with runtime.resources_fs.open(filepath,
                                           encoding='utf-8') as infile:
                root = etree.fromstring(infile.read())
                for child in root:
                    if child.tag is etree.Comment:
                        continue

                    elif child.tag in ['test', 'port', 'provider']:
                        cls.parse_attributes(child.tag, child, block)

                    elif child.tag == "hook_events":
                        hook_events = {
                            "suspend":
                            bool(strtobool(child.attrib.get("suspend",
                                                            "true"))),
                            "resume":
                            bool(strtobool(child.attrib.get("resume",
                                                            "true"))),
                            "delete":
                            bool(strtobool(child.attrib.get("delete", "true")))
                        }
                        block.hook_events = hook_events

                    else:
                        logger.warning(
                            "Attribute unknown to Hastexo XBlock: {}".format(
                                child.tag))
            # Import nested blocks
            for child in node:
                block.runtime.add_node_as_child(block, child, id_generator)
        else:
            for child in node:
                if child.tag is etree.Comment:
                    continue

                qname = etree.QName(child)
                tag = qname.localname
                namespace = qname.namespace

                if namespace == XML_NAMESPACES["option"]:
                    cls._set_field_if_present(block, tag, child.text,
                                              child.attrib)
                elif tag in ['test', 'port', 'provider']:
                    cls.parse_attributes(child.tag, child, block)
                else:
                    # Import nested blocks
                    block.runtime.add_node_as_child(block, child, id_generator)

        # Attributes become fields.
        for name, value in list(node.items()):  # lxml has no iteritems
            cls._set_field_if_present(block, name, value, {})

        return block

    def add_xml_to_node(self, node):
        """
        For exporting, set data on etree.Element `node`.
        """

        # Write xml data to file
        pathname = self.url_name.replace(':', '/')
        filepath = u'{category}/{pathname}.xml'.format(category=self.category,
                                                       pathname=pathname)

        self.runtime.export_fs.makedirs(os.path.dirname(filepath),
                                        recreate=True)

        with self.runtime.export_fs.open(filepath, 'wb') as filestream:
            root = etree.Element('hastexo')

            if self.hook_events:
                hook_events_node = etree.SubElement(root, 'hook_events')
                hook_events_node.set(
                    'suspend', str(self.hook_events.get("suspend", True)))
                hook_events_node.set('resume',
                                     str(self.hook_events.get("resume", True)))
                hook_events_node.set('delete',
                                     str(self.hook_events.get("delete", True)))

            if self.ports:
                for port in self.ports:
                    # port must have values for 'name' and 'number',
                    # raises KeyError if not defined.
                    port_node = etree.SubElement(root, 'port')
                    port_node.set('name', port['name'])
                    port_node.set('number', str(port['number']))

            if self.providers:
                for provider in self.providers:
                    provider_node = etree.SubElement(root, 'provider')
                    # raises KeyError if 'name' is not defined
                    # one must not add a provider without a name
                    provider_node.set('name', provider['name'])
                    capacity = provider.get("capacity", None)
                    if capacity in (None, "None", ""):
                        # capacity should not be undefined
                        # set to -1 (unlimited) in case it is
                        capacity = -1
                    provider_node.set('capacity', str(capacity))
                    # Not having a 'template' or an 'environment' defined for
                    # a provider is a valid option.
                    # Only add to node when defined a non-empty value.
                    template = provider.get("template", None)
                    if template not in (None, "None"):
                        provider_node.set('template', template)
                    environment = provider.get("environment", None)
                    if environment not in (None, "None"):
                        provider_node.set('environment', environment)

            if self.tests:
                for test in self.tests:
                    etree.SubElement(root, 'test').text = etree.CDATA(test)
            etree.ElementTree(root).write(filestream,
                                          pretty_print=True,
                                          encoding='utf-8')

        # Write out the xml file name
        filename = os.path.basename(pathname)

        # Add all editable fields as node attributes
        node.tag = self.category
        node.set("filename", filename)
        node.set('xblock-family', self.entry_point)
        node.set('display_name', self.display_name)
        node.set('progress_check_label', self.progress_check_label)
        node.set('show_hints_on_error', str(self.show_hints_on_error))
        node.set('show_feedback', str(self.show_feedback))
        node.set('progress_check_result_heading',
                 self.progress_check_result_heading)
        node.set('weight', str(self.weight))
        node.set('stack_user_name', self.stack_user_name)
        node.set('stack_protocol', self.stack_protocol)
        node.set('stack_template_path', self.stack_template_path or '')
        node.set('launch_timeout', str(self.launch_timeout or ''))
        node.set('suspend_timeout', str(self.suspend_timeout or ''))
        node.set('hook_script', self.hook_script or '')
        node.set('delete_age', str(self.delete_age or ''))
        node.set('read_only', str(self.read_only))

        # Include nested blocks in course export
        if self.has_children:
            for child_id in self.children:
                child = self.runtime.get_block(child_id)
                self.runtime.add_block_as_child_node(child, node)

    @property
    def allowed_nested_blocks(self):
        """
        Returns a list of allowed nested blocks.

        """
        additional_blocks = []
        try:
            from xmodule.video_module.video_module import VideoBlock
            _spec = NestedXBlockSpec(VideoBlock,
                                     category="video",
                                     label=u"Video")
            additional_blocks.append(_spec)
        except ImportError:
            logger.warning("Unable to import VideoBlock", exc_info=True)

        try:
            from pdf import pdfXBlock
            _spec = NestedXBlockSpec(pdfXBlock, category="pdf", label=u"PDF")
            additional_blocks.append(_spec)
        except ImportError:
            logger.info("Unable to import pdfXblock", exc_info=True)

        try:
            from markdown_xblock import MarkdownXBlock
            _spec = NestedXBlockSpec(MarkdownXBlock,
                                     category="markdown",
                                     label=u"Markdown")
            additional_blocks.append(_spec)
        except ImportError:
            logger.info("Unable to import MarkdownXBlock", exc_info=True)

        return [NestedXBlockSpec(None, category="html", label=u"HTML")
                ] + additional_blocks

    def is_correct(self):
        if not (self.check_status and isinstance(self.check_status, dict)):
            return False
        else:
            total = self.check_status.get('total')
            if not total:
                return False
            else:
                score = self.check_status.get('pass')
                return score == total

    def get_block_ids(self):
        try:
            course_id = getattr(self.xmodule_runtime, 'course_id', 'all')
            student_id = self.xmodule_runtime.anonymous_student_id
        except AttributeError:
            course_id = 'all'
            student_id = self.scope_ids.user_id

        return (course_id, student_id)

    def get_launch_timeout(self, settings):
        launch_timeout = None
        if self.launch_timeout:
            launch_timeout = self.launch_timeout
        else:
            launch_timeout = settings.get("launch_timeout")

        return launch_timeout

    def get_delete_age(self, settings):
        """
        Return 'delete_age' in seconds.
        XBlock attribute overrides the global setting.
        """
        if self.delete_age:
            # delete_age attribute value is already in seconds
            return int(self.delete_age)
        else:
            # delete_age value in settings is in days, convert to seconds
            return settings.get("delete_age", 14) * 86400

    def get_suspend_timeout(self):
        """
        Return 'suspend_timeout' in seconds.
        XBlock attribute overrides the global setting.
        """
        if self.suspend_timeout:
            return int(self.suspend_timeout)
        else:
            settings = get_xblock_settings()
            return settings.get("suspend_timeout", 120)

    def get_stack_name(self):
        # Get the course id and anonymous user id, and derive the stack name
        # from them
        course_id, student_id = self.get_block_ids()
        stack_name = "%s_%s_%s" % (course_id.course, course_id.run, student_id)

        # Replace anything in the stack name that is not an ASCII letter or
        # digit with an underscore
        replace_pattern = '[^%s%s]' % (string.digits, string.ascii_letters)
        stack_name = re.sub(re.compile(replace_pattern), '_', stack_name)

        return stack_name

    def student_view(self, context=None):
        """
        The primary view of the HastexoXBlock, shown to students when viewing
        courses.
        """
        # Load configuration
        settings = get_xblock_settings()

        # Get the course id and anonymous user id, and derive the stack name
        # from them
        course_id, student_id = self.get_block_ids()
        self.stack_run = "%s_%s" % (course_id.course, course_id.run)
        self.stack_name = self.get_stack_name()

        frag = Fragment()

        # Render children
        child_content = ""
        for child_id in self.children:
            child = self.runtime.get_block(child_id)
            child_fragment = child.render("student_view", context)
            frag.add_frag_resources(child_fragment)
            child_content += child_fragment.content

        # Render the main template
        frag.add_content(
            loader.render_django_template("static/html/main.html",
                                          {"child_content": child_content}))

        # Add the public CSS and JS
        frag.add_css_url(
            self.runtime.local_resource_url(self, 'public/css/main.css'))
        frag.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/plugins.js'))
        frag.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/main.js'))
        guac_js_version = settings.get("guacamole_js_version", "1.4.0")
        frag.add_javascript_url(
            self.runtime.local_resource_url(
                self,
                f'public/js/guacamole-common-js/{guac_js_version}-all.min.js'))

        # Create the stack in the database
        stack = self.create_stack(settings, course_id, student_id)

        # Call the JS initialization function
        frag.initialize_js(
            'HastexoXBlock', {
                "terminal_url": settings.get("terminal_url"),
                "timeouts": settings.get("js_timeouts"),
                "has_tests": len(self.tests) > 0,
                "protocol": self.stack_protocol,
                "ports": self.ports,
                "port": stack.port,
                "instructions_layout": settings.get("instructions_layout"),
                "read_only": self.read_only or self.hidden,
                "hidden": self.hidden,
                "progress_check_label": self.progress_check_label,
                "show_hints_on_error": self.show_hints_on_error,
                "show_feedback": self.show_feedback,
                "progress_check_result_heading":
                self.progress_check_result_heading
            })

        return frag

    @transaction.atomic
    def create_stack(self, settings, course_id, student_id):
        # use the 'student_id' to link the stack to user
        learner = AnonymousUserId.objects.get(
            anonymous_user_id=student_id).user

        stack, _ = Stack.objects.select_for_update().get_or_create(
            student_id=student_id,
            course_id=course_id,
            name=self.stack_name,
            learner=learner)

        # Set the port
        port = None
        if len(self.ports) > 0:
            ports = [p["number"] for p in self.ports]
            port = stack.port
            if not port or port not in ports:
                port = self.ports[0]["number"]

        # Set the providers
        providers = []
        if len(self.providers):
            for provider in self.providers:
                p = dict(provider)

                if not p.get("template"):
                    p["template"] = self.stack_template_path

                if not p["template"]:
                    raise Exception("Provider [%s] template file not "
                                    "provided for [%s]." %
                                    (p["name"], self.stack_name))
                providers.append(p)
        elif self.provider:
            # For backward compatibility
            if not self.stack_template_path:
                raise Exception("Provider [%s] template file not provided "
                                "for [%s]." % (self.provider, self.stack_name))

            providers.append({
                "name": self.provider,
                "capacity": -1,
                "template": self.stack_template_path,
                "environment": None
            })
        else:
            # No providers have been configured.  Use the "default" one if
            # it exists, or the first one if not.
            configured_providers = settings.get("providers", {})
            provider_name = None
            if configured_providers.get("default"):
                provider_name = "default"
            else:
                try:
                    provider_name = next(iter(configured_providers))
                except StopIteration:
                    pass

            if not provider_name:
                raise Exception("Provider not configured for [%s]." %
                                self.stack_name)
            elif not self.stack_template_path:
                raise Exception("Provider [%s] template file not "
                                "provided for [%s]." %
                                (provider_name, self.stack_name))
            else:
                providers.append({
                    "name": provider_name,
                    "capacity": -1,
                    "template": self.stack_template_path,
                    "environment": None
                })

        # Save
        stack.user = self.stack_user_name
        stack.run = self.stack_run
        stack.hook_script = self.hook_script
        stack.hook_events = self.hook_events
        stack.protocol = self.stack_protocol
        stack.port = port
        stack.providers = providers
        stack.delete_age = self.get_delete_age(settings)

        stack.save(update_fields=[
            "user", "run", "hook_script", "hook_events", "protocol", "port",
            "providers", "delete_age"
        ])

        return stack

    def update_stack(self, data):
        """
        Updates a stack in the database with the given data.  Must be invoked
        in a transaction.
        """
        course_id, student_id = self.get_block_ids()
        if not self.stack_name:
            # Stack name can be occasionally end up being empty (after deleting
            # learners state for example). If so, set it again.
            self.stack_name = self.get_stack_name()
        update_stack(self.stack_name, course_id, student_id, data)

    def get_stack(self, prop=None):
        course_id, student_id = self.get_block_ids()
        return get_stack(self.stack_name, course_id, student_id, prop)

    def launch_stack_task(self, settings, kwargs):
        soft_time_limit = self.get_launch_timeout(settings)
        hard_time_limit = soft_time_limit + 30

        return LaunchStackTask.apply_async(kwargs=kwargs,
                                           expires=soft_time_limit,
                                           soft_time_limit=soft_time_limit,
                                           time_limit=hard_time_limit)

    def launch_stack_task_result(self, task_id):
        return LaunchStackTask.AsyncResult(task_id)

    @XBlock.json_handler
    @transaction.atomic
    def get_user_stack_status(self, request_data, suffix=''):
        """
        Update the user stack status and return it.

        """
        settings = get_xblock_settings()
        course_id, student_id = self.get_block_ids()
        initialize = request_data.get("initialize", False)
        reset = request_data.get("reset", False)

        stack = Stack.objects.select_for_update().get(student_id=student_id,
                                                      course_id=course_id,
                                                      name=self.stack_name)

        def _launch_stack(reset=False):
            # Run
            result = self.launch_stack_task(
                settings, {
                    "stack_id": stack.id,
                    "reset": reset,
                    "learner_id": stack.learner.id
                })

            # Update stack
            stack.status = LAUNCH_PENDING
            stack.error_msg = ""
            stack.launch_task_id = result.id
            stack.launch_timestamp = timezone.now()

            logger.info("Fired async launch task [%s] for [%s]" %
                        (result.id, self.stack_name))

            return result

        def _check_result(result):
            if result and result.ready() and not result.successful():
                raise LaunchError(repr(result.result))

        if stack.status in DOWN_STATES or not stack.status:
            # Launch the stack if there's no known status, or if it's known to
            # be down.
            logger.info("Launching stack [%s] with previous status [%s]." %
                        (self.stack_name, stack.status))
            try:
                _check_result(_launch_stack(reset))
            except LaunchError as e:
                stack.status = LAUNCH_ERROR
                stack.error_msg = e.error_msg
        elif stack.status in UP_STATES:
            # The stack is up.  Reset it, if requested.
            if reset:
                logger.info("Resetting successfully launched stack [%s]." %
                            (self.stack_name))
                try:
                    _check_result(_launch_stack(reset))
                except LaunchError as e:
                    stack.status = LAUNCH_ERROR
                    stack.error_msg = e.error_msg

            else:
                logger.info("Successful launch detected for [%s], "
                            "with status [%s]" %
                            (self.stack_name, stack.status))
        elif stack.status == LAUNCH_PENDING:
            # The stack is pending launch.
            try:
                # Check if the Celery task hasn't blown up.
                task_id = stack.launch_task_id
                _check_result(self.launch_stack_task_result(task_id))
            except LaunchError as e:
                stack.status = LAUNCH_ERROR
                stack.error_msg = e.error_msg
            else:
                # Calculate time since launch
                time_since_launch = 0
                launch_timestamp = stack.launch_timestamp
                if launch_timestamp:
                    time_since_launch = (timezone.now() -
                                         launch_timestamp).seconds
                launch_timeout = self.get_launch_timeout(settings)

                # Check if the pending task hasn't timed out.
                if time_since_launch <= launch_timeout:
                    # The pending task still has some time to finish.
                    # Please wait.
                    logger.debug("Launch pending for [%s]" % (self.stack_name))

                elif initialize or reset:
                    # Timeout reached, but the user just entered the page or
                    # requested a reset.  Try launching the stack again.
                    if initialize:
                        logger.info("Launch timeout detected on initialize. "
                                    "Launching stack [%s]" % (self.stack_name))
                    else:
                        logger.info("Launch timeout detected on reset. "
                                    "Resetting stack [%s]" % (self.stack_name))
                    try:
                        _check_result(_launch_stack(reset))
                    except LaunchError as e:
                        stack.status = LAUNCH_ERROR
                        stack.error_msg = e.error_msg
                else:
                    # Timeout reached.  Consider the task a failure and let the
                    # user retry manually.
                    logger.error("Launch timeout reached for [%s] "
                                 "after %s seconds" %
                                 (self.stack_name, time_since_launch))
                    stack.status = LAUNCH_ERROR
                    stack.error_msg = "Timeout when launching stack."
        elif stack.status in PENDING_STATES:
            # The stack is otherwise pending.  Report and let the user retry
            # manually.
            logger.error("Detected pending stack [%s], "
                         "with status [%s]" % (self.stack_name, stack.status))
        elif initialize or reset:
            # Detected an unforeseen state, but the user just entered the page,
            # or requested a retry or reset, so start from scratch.
            if reset:
                logger.info("Resetting failed stack [%s]." % (self.stack_name))
            else:
                logger.info("Retrying previously failed stack [%s]." %
                            (self.stack_name))
            try:
                _check_result(_launch_stack(reset))
            except LaunchError as e:
                stack.status = LAUNCH_ERROR
                stack.error_msg = e.error_msg
        else:
            # Detected a failed stack.  Report the error and let the user retry
            # manually.
            logger.error("Failed stack [%s] detected with status [%s]." %
                         (self.stack_name, stack.status))

        # Reset the dead man's switch
        stack.suspend_timestamp = timezone.now()
        stack.suspend_by = timezone.now() + timezone.timedelta(
            seconds=self.get_suspend_timeout())

        # Save changes to the database
        stack.save()

        return {
            "status": stack.status,
            "error_msg": stack.error_msg,
            "name": stack.name
        }

    @XBlock.json_handler
    @transaction.atomic
    def keepalive(self, data, suffix=''):
        """
        Reset the dead man's switch.

        """
        self.update_stack({
            "suspend_timestamp":
            timezone.now(),
            "suspend_by":
            timezone.now() +
            timezone.timedelta(seconds=self.get_suspend_timeout())
        })

    @XBlock.json_handler
    @transaction.atomic
    def set_port(self, data, suffix=''):
        """
        Set the preferred stack port

        """
        self.update_stack({"port": int(data.get("port"))})

    def check_progress_task(self, soft_time_limit, **kwargs):
        time_limit = soft_time_limit + 30
        result = CheckStudentProgressTask.apply_async(
            kwargs=kwargs,
            expires=soft_time_limit,
            soft_time_limit=soft_time_limit,
            time_limit=time_limit)

        return result

    def check_progress_task_result(self, check_id):
        return CheckStudentProgressTask.AsyncResult(check_id)

    @XBlock.json_handler
    def get_check_status(self, data, suffix=''):
        """
        Checks the current student score.
        """
        settings = get_xblock_settings()
        check_timeout = settings.get("check_timeout")

        def _launch_check():
            stack = self.get_stack()
            logger.info('Executing tests for stack [%s], IP [%s], user [%s]:' %
                        (self.stack_name, stack.ip, self.stack_user_name))
            for test in self.tests:
                logger.info('Test: %s' % test)

            kwargs = {
                "tests": self.tests,
                "stack_ip": stack.ip,
                "stack_user_name": self.stack_user_name,
                "stack_key": stack.key
            }
            result = self.check_progress_task(check_timeout, **kwargs)

            # Save task ID and timestamp
            self.check_id = result.id
            self.check_timestamp = int(time.time())

            return result

        def _process_result(result):
            if result.ready():
                # Clear the task ID so we know there is no task running.
                self.check_id = ""

                if (result.successful() and isinstance(result.result, dict)
                        and not result.result.get('error')):
                    status = result.result
                    score = Score(raw_earned=status['pass'],
                                  raw_possible=status['total'])
                    self.set_score(score)
                    # A publish event is necessary for calculating grades
                    self.publish_grade()

                else:
                    status = {
                        'status': 'ERROR',
                        'error_msg': 'Unexpected result: %s' %
                        repr(result.result)  # noqa: E501
                    }
            else:
                status = {'status': 'CHECK_PROGRESS_PENDING'}

            # Store the result
            self.check_status = status

            return status

        # If a check task is running, return its status.
        if self.check_id:
            logger.info('Check progress task is running: %s' % self.check_id)
            result = self.check_progress_task_result(self.check_id)
            status = _process_result(result)

            if status['status'] == 'CHECK_PROGRESS_PENDING':
                time_since_check = int(time.time()) - self.check_timestamp

                # Check if the pending task hasn't timed out.
                if time_since_check >= check_timeout:
                    # Timeout reached.  Consider the task a failure and let the
                    # user retry manually.
                    logger.error('Check timeout reached for [%s] '
                                 'after %s seconds' %
                                 (self.stack_name, time_since_check))
                    self.check_id = ""
                    status = {
                        'status': 'ERROR',
                        'error_msg': "Timeout when checking progress."
                    }

        # Otherwise, launch the check task.
        else:
            result = _launch_check()
            status = _process_result(result)

        return status

    def max_score(self):
        """
        Return the maximum possible score for this XBlock.
        """
        return self.weight

    def has_submitted_answer(self):
        """
        Returns True if the problem has been answered by the runtime user.
        """
        return self.score is not None

    def set_score(self, score):
        """
        Persist a score to the XBlock.

        The score is a named tuple with a raw_earned attribute and a
        raw_possible attribute, reflecting the raw earned score and the maximum
        raw score the student could have earned respectively.

        Arguments:
            score: Score(raw_earned=float, raw_possible=float)

        Returns:
            None
        """
        self.score = {
            'raw_earned': score.raw_earned,
            'raw_possible': score.raw_possible
        }

    def get_score(self):
        """
        Return a raw score already persisted on the XBlock.  Should not
        perform new calculations.
        Returns:
            Score(raw_earned=float, raw_possible=float)
        """
        if not self.score:
            logger.warning("No score is earned for this block yet")
        else:
            return Score(raw_earned=self.score.get('raw_earned'),
                         raw_possible=self.score.get('raw_possible'))

    def calculate_score(self):
        """
        Calculate a new raw score based on the state of the problem.
        This method should not modify the state of the XBlock.
        Returns:
            Score(raw_earned=float, raw_possible=float)
        """
        # Nothing to calculate here
        # This will be called only if self.has_submitted_answer() returns True
        # Just return the stored value
        return self.get_score()

    def publish_grade(self):
        """
        Publish a grade to the runtime.
        """
        score = self.get_score()
        self._publish_grade(score=score)

    @staticmethod
    def workbench_scenarios():
        """A canned scenario for display in the workbench."""
        return [
            ("HastexoXBlock", """<vertical_demo>
                <hastexo/>
                </vertical_demo>
             """),
        ]
コード例 #17
0
class EproctoringXBlock(XBlock):
    """
    The main function being called every time page gets loaded or refreshed.
    """

    # Fields are defined on the class.  You can access them in your code as
    # self.<fieldname>.

    # PATH set by ADMIN - To save all the suspicious-attempts' proofs
    path = "/home/edx/suspicious_images"

    # Self-Explanatory
    student_name = String(
        default="",
        scope=Scope.user_state,
        help="Name of user",
    )

    # Self-Explanatory => TEST VALUE
    count = Integer(
        default=0,
        scope=Scope.user_state,
        help="A simple counter, to show something happening",
    )

    sum_time = Float(
        default=0.0,
        scope=Scope.user_state,
        help="A counter storing time between last web-cheating attempt",
    )

    time_dict = Dict(default={}, scope=Scope.user_state)

    content_id = String(default="", scope=Scope.user_state)

    course_name = String(default="", scope=Scope.user_state)

    def get_all_courses(self):
        """
	This function returns a boolean value telling if the Subsection is timed or not.
	It is called if the user is "instructor" or "staff".
	"""

        sql_user = '******'
        database = 'edxapp'
        sql_passwd = ""

        try:
            db_mysql = MySQLdb.connect(
                user=sql_user, passwd=sql_passwd,
                db=database)  # Establishing MySQL connection
        except:
            print "MySQL connection not established"
            #return HttpResponse("MySQL connection not established") # MySQL could not be connected

        name = self.content_id
        query = "select is_active from proctoring_proctoredexam where content_id = %s"

        mysql_cursor = db_mysql.cursor()
        mysql_cursor.execute(query, (name, ))
        courses = mysql_cursor.fetchall()
        try:
            is_active_or_not = courses[0][0]
            if (is_active_or_not):
                return 1
            else:
                return 0
        except:
            return 0

        directory = path + "/" + course_name

        if not os.path.exists(directory):
            os.makedirs(directory)

        directory += "/" + block_id

        if not os.path.exists(directory):
            os.makedirs(directory)

        directory += "/" + student_name

        if not os.path.exists(directory):
            os.makedirs(directory)

        return directory + "/"

    def get_path_calling(self):
        path_webimg = self.get_path(
            self.path, self.xmodule_runtime.xqueue['default_queuename'],
            self.location.block_id,
            database.get_user(self.xmodule_runtime.user_id)) + "web_img"
        if not os.path.exists(path_webimg):
            os.makedirs(path_webimg)

        path_webimg += "/"

        return path_webimg + str(len(os.listdir(path_webimg))) + "_web.png"

    def resource_string(self, path):
        """Handy helper for getting resources from our kit."""

        data = pkg_resources.resource_string(__name__, path)
        return data.decode("utf8")

    def is_instructor(self):
        return self.xmodule_runtime.get_user_role() == 'instructor'

    def studio_view(self, request, context=None):
        """
        The primary view of the EproctoringXBlock, shown to students
        when viewing courses.
        """

        html = self.resource_string("templates/eproctoring_xblock2.html")
        frag = Fragment(html.format(self=self))
        frag.add_css(
            self.resource_string("static/css/eproctoring_xblock2.css"))
        frag.initialize_js('EproctoringXBlock')
        return frag

    def student_view(self, request, context=None):
        """
        The primary view of the EproctoringXBlock, shown to students
        when viewing courses.
        """

        self.sum_time = 0.0

        if self.xmodule_runtime.get_user_role() == 'student':

            # This block should run only if the current module is timed
            context = {
                "student_name":
                database.get_user(self.xmodule_runtime.user_id),
            }

            html = self.resource_string("templates/eproctoring_xblock.html")
            frag = Fragment()
            frag.add_content(
                render_template("templates/eproctoring_xblock.html", context))
            frag.add_css(
                self.resource_string("static/css/eproctoring_xblock.css"))
            frag.add_javascript(
                self.resource_string("static/js/src/eproctoring_xblock.js"))
            frag.initialize_js('EproctoringXBlock')
            return frag

        else:
            if self.xmodule_runtime.get_user_role() == 'instructor':
                #If the user is instructor, we are finding if the subsection is timed or not.
                #"reload_page" won't be relevant at the first alert (since request won't exist without reloading the page)
                #But at the second alert, we don't need to reload it again.

                if (request['root_xblock']):
                    self.content_id = str(request['root_xblock'].parent)
                    is_active_bool = self.get_all_courses()
                    reload_page = 0
                else:
                    reload_page = 1
                    is_active_bool = 0

#Analogous to request['root_xblock'].parent for insructor is request['activate_block_id'] for staff
            if self.xmodule_runtime.get_user_role() == 'staff':
                self.content_id = str(request['activate_block_id'])
                is_active_bool = self.get_all_courses()
                reload_page = 0

            #Passing the boolean variables as context to eproctoring_xblock2.html
            context = {
                "is_active_bool": is_active_bool,
                "reload_page": reload_page,
            }

            html = self.resource_string("templates/eproctoring_xblock2.html")
            frag = Fragment()
            frag.add_content(
                render_template("templates/eproctoring_xblock2.html", context))
            frag.add_css(
                self.resource_string("static/css/eproctoring_xblock2.css"))
            frag.add_javascript(
                self.resource_string("static/js/src/eproctoring_xblock2.js"))
            frag.initialize_js('EproctoringXBlock')
            return frag

    @XBlock.json_handler
    def send_img(self, image_string, suffix=''):

        file = open(
            self.get_path(self.path,
                          self.xmodule_runtime.xqueue['default_queuename'],
                          self.location.block_id,
                          database.get_user(self.xmodule_runtime.user_id)) +
            "json_data.txt", "w")
        file.write(image_string)
        file.close()

        sub_analyse.sub_process(
            self.path, database.get_user(self.xmodule_runtime.user_id),
            self.xmodule_runtime.xqueue['default_queuename'],
            self.location.block_id)

    @XBlock.json_handler
    def reset_handler(self, data, suffix=''):
        database.reset_table()

    @XBlock.json_handler
    def update_handler(self, data, suffix=''):
        database.update_total()

    @XBlock.json_handler
    def save_student_handler(self, data, suffix=''):

        parent_string = str(self.parent)

        index_colon = parent_string.find(':')
        column_dir_name = ""

        plus_count = 0

        for index in xrange(index_colon + 1, len(parent_string)):
            if (plus_count == 2):
                break
            if (parent_string[index] == '+'):
                if (plus_count == 0):
                    column_dir_name += '-'
                plus_count += 1
            else:
                column_dir_name += parent_string[index]

        file = open(self.path + "/" + column_dir_name + ".txt", "w")

        desc_array = database.final_result(column_dir_name)

        for student_name in desc_array:
            file.write(student_name + "\n")

        file.close()

    @XBlock.json_handler
    def compare_image(self, data, suffix=''):
        """
        An handler, which compares the new image with the profile image.
        """

        path_compareimg1 = self.get_path(
            self.path, self.xmodule_runtime.xqueue['default_queuename'],
            self.location.block_id,
            database.get_user(self.xmodule_runtime.user_id)) + "compare_img"

        if not os.path.exists(path_compareimg1):
            os.makedirs(path_compareimg1)

        path_compareimg1 += "/"

        path_compareimg2 = path_compareimg1 + str(
            len(os.listdir(path_compareimg1))) + "_compare.png"

        path_compareimg1 += "profile_pic"

        compare_image_string = data

        file = open(
            "/edx/app/edxapp/venvs/proctor/lib/python2.7/site-packages/compare_image/"
            + "json_data1.txt", "w")
        file.write(compare_image_string)

        file.close()
        image2path = profile_pic.get_pic_path(
            database.get_user(self.xmodule_runtime.user_id))

        command = [
            'python',
            '/edx/app/edxapp/venvs/proctor/lib/python2.7/site-packages/compare2.py',
            str(image2path),
            str(path_compareimg1),
            str(path_compareimg2),
            str(self.xmodule_runtime.user_id),
            str(self.xmodule_runtime.xqueue['default_queuename'])
        ]
        process_call = subprocess.check_output(command)

        self.count += 1
        return {"count": self.count}

    @XBlock.json_handler
    def startingtime(self, data, suffix=''):
        newdict = self.time_dict
        newdict["start_time"] = time.time()
        self.time_dict = newdict

    @XBlock.json_handler
    def differenceoftimes(self, data, suffix=''):
        self.sum_time = (time.time() - self.time_dict["start_time"])
        database.update_table(self.xmodule_runtime.user_id,
                              self.xmodule_runtime.xqueue['default_queuename'],
                              "web_count", self.sum_time)

    # TO-DO: change this handler to perform your own actions.  You may need more
    # than one handler, or you may not need any handlers at all.
    @XBlock.json_handler
    def increment_count(self, data, suffix=''):
        """
        An example handler, which increments the data.
        """
        # Just to show data coming in...
        assert data['hello'] == 'world'

        self.count += 1
        return {"count": self.count}

    @XBlock.json_handler
    def web_screen_shot(self, data, suffix=''):

        image = base64.b64decode(data[22:])

        file_name = self.get_path_calling()

        with open(file_name, 'wb') as file_name:
            file_name.write(image)

    # TO-DO: change this to create the scenarios you'd like to see in the
    # worbench while developing your XBlock.
    @staticmethod
    def workbench_scenarios():
        """A canned scenario for display in the workbench."""

        return [
            ("EproctoringXBlock", """<eproctoring_xblock/>
             """),
            ("Multiple EproctoringXBlock", """<vertical_demo>
                <eproctoring_xblock/>
                <eproctoring_xblock/>
                <eproctoring_xblock/>
                </vertical_demo>
             """),
        ]
コード例 #18
0
ファイル: mentoring.py プロジェクト: gibacache/xblock
class BaseMentoringBlock(
        XBlock,
        XBlockWithTranslationServiceMixin,
        XBlockWithSettingsMixin,
        StudioEditableXBlockMixin,
        ThemableXBlockMixin,
        MessageParentMixin,
):
    """
    An XBlock that defines functionality shared by mentoring blocks.
    """
    # Content
    show_title = Boolean(display_name=_("Show title"),
                         help=_("Display the title?"),
                         default=True,
                         scope=Scope.content)
    max_attempts = Integer(
        display_name=_("Max. attempts allowed"),
        help=
        _("Maximum number of times students are allowed to attempt the questions belonging to this block"
          ),
        default=0,
        scope=Scope.content,
        enforce_type=True)
    weight = Float(display_name=_("Weight"),
                   help=_("Defines the maximum total grade of the block."),
                   default=1,
                   scope=Scope.settings,
                   enforce_type=True)

    # User state
    num_attempts = Integer(
        # Number of attempts a user has answered for this questions
        default=0,
        scope=Scope.user_state,
        enforce_type=True)

    has_children = True
    has_score = True  # The Problem/Step Builder XBlocks produce scores. (Their children do not send scores to the LMS.)

    icon_class = 'problem'
    block_settings_key = 'mentoring'
    options_key = 'options'

    default_theme_config = {
        'package': 'problem_builder',
        'locations': ['public/themes/lms.css']
    }

    @property
    def url_name(self):
        """
        Get the url_name for this block. In Studio/LMS it is provided by a mixin, so we just
        defer to super(). In the workbench or any other platform, we use the usage_id.
        """
        try:
            return super(BaseMentoringBlock, self).url_name
        except AttributeError:
            return unicode(self.scope_ids.usage_id)

    @property
    def review_tips_json(self):
        return json.dumps(self.review_tips)

    @property
    def max_attempts_reached(self):
        return self.max_attempts > 0 and self.num_attempts >= self.max_attempts

    def get_content_titles(self):
        """
        By default, each Sequential block in a course ("Subsection" in Studio parlance) will
        display the display_name of each descendant in a tooltip above the content. We don't
        want that - we only want to display one title for this mentoring block as a whole.
        Otherwise things like "Choice (yes) (Correct)" will appear in the tooltip.

        If this block has no title set, don't display any title. Then, if this is the only block
        in the unit, the unit's title will be used. (Why isn't it always just used?)
        """
        has_explicitly_set_title = self.fields['display_name'].is_set_on(self)
        if has_explicitly_set_title:
            return [self.display_name]
        return []

    def get_options(self):
        """
        Get options settings for this block from settings service.

        Fall back on default options if xblock settings have not been customized at all
        or no customizations for options available.
        """
        xblock_settings = self.get_xblock_settings(default={})
        if xblock_settings and self.options_key in xblock_settings:
            return xblock_settings[self.options_key]
        return _default_options_config

    def get_option(self, option):
        """
        Get value of a specific instance-wide `option`.
        """
        return self.get_options().get(option)

    @XBlock.json_handler
    def view(self, data, suffix=''):
        """
        Current HTML view of the XBlock, for refresh by client
        """
        frag = self.student_view({})
        return {'html': frag.content}

    @XBlock.json_handler
    def publish_event(self, data, suffix=''):
        """
        Publish data for analytics purposes
        """
        event_type = data.pop('event_type')
        if (event_type == 'grade'):
            # This handler can be called from the browser. Don't allow the browser to submit arbitrary grades ;-)
            raise JsonHandlerError(
                403, "Posting grade events from the browser is forbidden.")

        self.runtime.publish(self, event_type, data)
        return {'result': 'ok'}

    def author_preview_view(self, context):
        """
        Child blocks can override this to add a custom preview shown to
        authors in Studio when not editing this block's children.
        """
        fragment = self.student_view(context)
        fragment.add_content(
            loader.render_template('templates/html/mentoring_url_name.html',
                                   {"url_name": self.url_name}))
        fragment.add_css_url(
            self.runtime.local_resource_url(
                self, 'public/css/problem-builder-edit.css'))
        self.include_theme_files(fragment)
        return fragment

    def max_score(self):
        """ Maximum score. We scale all scores to a maximum of 1.0 so this is always 1.0 """
        return 1.0
コード例 #19
0
class DragAndDropBlock(ScorableXBlockMixin, XBlock, XBlockWithSettingsMixin,
                       ThemableXBlockMixin):
    """
    XBlock that implements a friendly Drag-and-Drop problem
    """

    CATEGORY = "drag-and-drop-v2"

    SOLUTION_CORRECT = "correct"
    SOLUTION_PARTIAL = "partial"
    SOLUTION_INCORRECT = "incorrect"

    GRADE_FEEDBACK_CLASSES = {
        SOLUTION_CORRECT: FeedbackMessages.MessageClasses.CORRECT_SOLUTION,
        SOLUTION_PARTIAL: FeedbackMessages.MessageClasses.PARTIAL_SOLUTION,
        SOLUTION_INCORRECT: FeedbackMessages.MessageClasses.INCORRECT_SOLUTION,
    }

    PROBLEM_FEEDBACK_CLASSES = {
        SOLUTION_CORRECT: FeedbackMessages.MessageClasses.CORRECT_SOLUTION,
        SOLUTION_PARTIAL: None,
        SOLUTION_INCORRECT: None
    }

    display_name = String(
        display_name=_("Title"),
        help=
        _("The title of the drag and drop problem. The title is displayed to learners."
          ),
        scope=Scope.settings,
        default=_("Drag and Drop"),
        enforce_type=True,
    )

    mode = String(
        display_name=_("Mode"),
        help=_(
            "Standard mode: the problem provides immediate feedback each time "
            "a learner drops an item on a target zone. "
            "Assessment mode: the problem provides feedback only after "
            "a learner drops all available items on target zones."),
        scope=Scope.settings,
        values=[
            {
                "display_name": _("Standard"),
                "value": Constants.STANDARD_MODE
            },
            {
                "display_name": _("Assessment"),
                "value": Constants.ASSESSMENT_MODE
            },
        ],
        default=Constants.STANDARD_MODE,
        enforce_type=True,
    )

    max_attempts = Integer(
        display_name=_("Maximum attempts"),
        help=_(
            "Defines the number of times a student can try to answer this problem. "
            "If the value is not set, infinite attempts are allowed."),
        scope=Scope.settings,
        default=None,
        enforce_type=True,
    )

    show_title = Boolean(
        display_name=_("Show title"),
        help=_("Display the title to the learner?"),
        scope=Scope.settings,
        default=True,
        enforce_type=True,
    )

    question_text = String(
        display_name=_("Problem text"),
        help=
        _("The description of the problem or instructions shown to the learner."
          ),
        scope=Scope.settings,
        default="",
        enforce_type=True,
    )

    show_question_header = Boolean(
        display_name=_('Show "Problem" heading'),
        help=_('Display the heading "Problem" above the problem text?'),
        scope=Scope.settings,
        default=True,
        enforce_type=True,
    )

    weight = Float(
        display_name=_("Problem Weight"),
        help=_("Defines the number of points the problem is worth."),
        scope=Scope.settings,
        default=1,
        enforce_type=True,
    )

    item_background_color = String(
        display_name=_("Item background color"),
        help=
        _("The background color of draggable items in the problem (example: 'blue' or '#0000ff')."
          ),
        scope=Scope.settings,
        default="",
        enforce_type=True,
    )

    item_text_color = String(
        display_name=_("Item text color"),
        help=
        _("Text color to use for draggable items (example: 'white' or '#ffffff')."
          ),
        scope=Scope.settings,
        default="",
        enforce_type=True,
    )

    max_items_per_zone = Integer(
        display_name=_("Maximum items per zone"),
        help=
        _("This setting limits the number of items that can be dropped into a single zone."
          ),
        scope=Scope.settings,
        default=None,
        enforce_type=True,
    )

    data = Dict(
        display_name=_("Problem data"),
        help=
        _("Information about zones, items, feedback, and background image for this problem. "
          "This information is derived from the input that a course author provides via the interactive editor "
          "when configuring the problem."),
        scope=Scope.content,
        default=DEFAULT_DATA,
        enforce_type=True,
    )

    item_state = Dict(
        help=
        _("Information about current positions of items that a learner has dropped on the target image."
          ),
        scope=Scope.user_state,
        default={},
        enforce_type=True,
    )

    attempts = Integer(
        help=_("Number of attempts learner used"),
        scope=Scope.user_state,
        default=0,
        enforce_type=True,
    )

    completed = Boolean(
        help=
        _("Indicates whether a learner has completed the problem at least once"
          ),
        scope=Scope.user_state,
        default=False,
        enforce_type=True,
    )

    grade = Float(help=_(
        "DEPRECATED. Keeps maximum score achieved by student as a weighted value."
    ),
                  scope=Scope.user_state,
                  default=0)

    raw_earned = Float(
        help=
        _("Keeps maximum score achieved by student as a raw value between 0 and 1."
          ),
        scope=Scope.user_state,
        default=0,
        enforce_type=True,
    )

    block_settings_key = 'drag-and-drop-v2'

    def max_score(self):  # pylint: disable=no-self-use
        """
        Return the problem's max score, which for DnDv2 always equals 1.
        Required by the grading system in the LMS.
        """
        return 1

    def get_score(self):
        """
        Return the problem's current score as raw values.
        """
        if self._get_raw_earned_if_set() is None:
            self.raw_earned = self._learner_raw_score()
        return Score(self.raw_earned, self.max_score())

    def set_score(self, score):
        """
        Sets the score on this block.
        Takes a Score namedtuple containing a raw
        score and possible max (for this block, we expect that this will
        always be 1).
        """
        assert score.raw_possible == self.max_score()
        self.raw_earned = score.raw_earned

    def calculate_score(self):
        """
        Returns a newly-calculated raw score on the problem for the learner
        based on the learner's current state.
        """
        return Score(self._learner_raw_score(), self.max_score())

    def has_submitted_answer(self):
        """
        Returns True if the user has made a submission.
        """
        return self.fields['raw_earned'].is_set_on(
            self) or self.fields['grade'].is_set_on(self)

    def weighted_grade(self):
        """
        Returns the block's current saved grade multiplied by the block's
        weight- the number of points earned by the learner.
        """
        return self.raw_earned * self.weight

    def _learner_raw_score(self):
        """
        Calculate raw score for learner submission.

        As it is calculated as ratio of correctly placed (or left in bank in case of decoys) items to
        total number of items, it lays in interval [0..1]
        """
        correct_count, total_count = self._get_item_stats()
        return correct_count / float(total_count)

    @staticmethod
    def _get_statici18n_js_url():
        """
        Returns the Javascript translation file for the currently selected language, if any found by
        `pkg_resources`
        """
        lang_code = translation.get_language()
        if not lang_code:
            return None
        text_js = 'public/js/translations/{lang_code}/text.js'
        country_code = lang_code.split('-')[0]
        for code in (lang_code, country_code):
            if pkg_resources.resource_exists(loader.module_name,
                                             text_js.format(lang_code=code)):
                return text_js.format(lang_code=code)
        return None

    @XBlock.supports(
        "multi_device"
    )  # Enable this block for use in the mobile app via webview
    def student_view(self, context):
        """
        Player view, displayed to the student
        """

        fragment = Fragment()
        fragment.add_content(
            loader.render_django_template('/templates/html/drag_and_drop.html',
                                          i18n_service=self.i18n_service))
        css_urls = ('public/css/drag_and_drop.css', )
        js_urls = [
            'public/js/vendor/virtual-dom-1.3.0.min.js',
            'public/js/drag_and_drop.js',
        ]

        statici18n_js_url = self._get_statici18n_js_url()
        if statici18n_js_url:
            js_urls.append(statici18n_js_url)

        for css_url in css_urls:
            fragment.add_css_url(self.runtime.local_resource_url(
                self, css_url))
        for js_url in js_urls:
            fragment.add_javascript_url(
                self.runtime.local_resource_url(self, js_url))

        self.include_theme_files(fragment)

        fragment.initialize_js('DragAndDropBlock', self.student_view_data())

        return fragment

    def student_view_data(self, context=None):
        """
        Get the configuration data for the student_view.
        The configuration is all the settings defined by the author, except for correct answers
        and feedback.
        """
        def items_without_answers():
            """
            Removes feedback and answer from items
            """
            items = copy.deepcopy(self.data.get('items', ''))
            for item in items:
                del item['feedback']
                # Use item.pop to remove both `item['zone']` and `item['zones']`; we don't have
                # a guarantee that either will be present, so we can't use `del`. Legacy instances
                # will have `item['zone']`, while current versions will have `item['zones']`.
                item.pop('zone', None)
                item.pop('zones', None)
                # Fall back on "backgroundImage" to be backward-compatible.
                image_url = item.get('imageURL') or item.get('backgroundImage')
                if image_url:
                    item['expandedImageURL'] = self._expand_static_url(
                        image_url)
                else:
                    item['expandedImageURL'] = ''
            return items

        return {
            "block_id": six.text_type(self.scope_ids.usage_id),
            "display_name": self.display_name,
            "type": self.CATEGORY,
            "weight": self.weight,
            "mode": self.mode,
            "zones": self.zones,
            "max_attempts": self.max_attempts,
            "graded": getattr(self, 'graded', False),
            "weighted_max_score": self.max_score() * self.weight,
            "max_items_per_zone": self.max_items_per_zone,
            # SDK doesn't supply url_name.
            "url_name": getattr(self, 'url_name', ''),
            "display_zone_labels": self.data.get('displayLabels', False),
            "display_zone_borders": self.data.get('displayBorders', False),
            "items": items_without_answers(),
            "title": self.display_name,
            "show_title": self.show_title,
            "problem_text": self.question_text,
            "show_problem_header": self.show_question_header,
            "target_img_expanded_url": self.target_img_expanded_url,
            "target_img_description": self.target_img_description,
            "item_background_color": self.item_background_color or None,
            "item_text_color": self.item_text_color or None,
            "has_deadline_passed": self.has_submission_deadline_passed,
            # final feedback (data.feedback.finish) is not included - it may give away answers.
        }

    def studio_view(self, context):
        """
        Editing view in Studio
        """
        js_templates = loader.load_unicode('/templates/html/js_templates.html')
        # Get an 'id_suffix' string that is unique for this block.
        # We append it to HTML element ID attributes to ensure multiple instances of the DnDv2 block
        # on the same page don't share the same ID value.
        # We avoid using ID attributes in preference to classes, but sometimes we still need IDs to
        # connect 'for' and 'aria-describedby' attributes to the associated elements.
        id_suffix = self._get_block_id()
        js_templates = js_templates.replace('{{id_suffix}}', id_suffix)
        context = {
            'js_templates': js_templates,
            'id_suffix': id_suffix,
            'fields': self.fields,
            'self': self,
            'data': six.moves.urllib.parse.quote(json.dumps(self.data)),
        }

        fragment = Fragment()
        fragment.add_content(
            loader.render_django_template(
                '/templates/html/drag_and_drop_edit.html',
                context=context,
                i18n_service=self.i18n_service))
        css_urls = ('public/css/drag_and_drop_edit.css', )
        js_urls = [
            'public/js/vendor/handlebars-v1.1.2.js',
            'public/js/drag_and_drop_edit.js',
        ]

        statici18n_js_url = self._get_statici18n_js_url()
        if statici18n_js_url:
            js_urls.append(statici18n_js_url)

        for css_url in css_urls:
            fragment.add_css_url(self.runtime.local_resource_url(
                self, css_url))
        for js_url in js_urls:
            fragment.add_javascript_url(
                self.runtime.local_resource_url(self, js_url))

        # Do a bit of manipulation so we get the appearance of a list of zone options on
        # items that still have just a single zone stored

        items = self.data.get('items', [])

        for item in items:
            zones = self.get_item_zones(item['id'])
            # Note that we appear to be mutating the state of the XBlock here, but because
            # the change won't be committed, we're actually just affecting the data that
            # we're going to send to the client, not what's saved in the backing store.
            item['zones'] = zones
            item.pop('zone', None)

        fragment.initialize_js(
            'DragAndDropEditBlock', {
                'data': self.data,
                'target_img_expanded_url': self.target_img_expanded_url,
                'default_background_image_url':
                self.default_background_image_url,
            })

        return fragment

    @XBlock.json_handler
    def studio_submit(self, submissions, suffix=''):
        """
        Handles studio save.
        """
        self.display_name = submissions['display_name']
        self.mode = submissions['mode']
        self.max_attempts = submissions['max_attempts']
        self.show_title = submissions['show_title']
        self.question_text = submissions['problem_text']
        self.show_question_header = submissions['show_problem_header']
        self.weight = float(submissions['weight'])
        self.item_background_color = submissions['item_background_color']
        self.item_text_color = submissions['item_text_color']
        self.max_items_per_zone = self._get_max_items_per_zone(submissions)
        self.data = submissions['data']

        return {
            'result': 'success',
        }

    def _get_block_id(self):
        """
        Return unique ID of this block. Useful for HTML ID attributes.
        Works both in LMS/Studio and workbench runtimes:
        - In LMS/Studio, use the location.html_id method.
        - In the workbench, use the usage_id.
        """
        if hasattr(self, 'location'):
            return self.location.html_id()  # pylint: disable=no-member
        else:
            return six.text_type(self.scope_ids.usage_id)

    @staticmethod
    def _get_max_items_per_zone(submissions):
        """
        Parses Max items per zone value coming from editor.

        Returns:
            * None if invalid value is passed (i.e. not an integer)
            * None if value is parsed into zero or negative integer
            * Positive integer otherwise.

        Examples:
            * _get_max_items_per_zone(None) -> None
            * _get_max_items_per_zone('string') -> None
            * _get_max_items_per_zone('-1') -> None
            * _get_max_items_per_zone(-1) -> None
            * _get_max_items_per_zone('0') -> None
            * _get_max_items_per_zone('') -> None
            * _get_max_items_per_zone('42') -> 42
            * _get_max_items_per_zone(42) -> 42
        """
        raw_max_items_per_zone = submissions.get('max_items_per_zone', None)

        # Entries that aren't numbers should be treated as null. We assume that if we can
        # turn it into an int, a number was submitted.
        try:
            max_attempts = int(raw_max_items_per_zone)
            if max_attempts > 0:
                return max_attempts
            else:
                return None
        except (ValueError, TypeError):
            return None

    @XBlock.json_handler
    def drop_item(self, item_attempt, suffix=''):
        """
        Handles dropping item into a zone.
        """
        self._validate_drop_item(item_attempt)

        if self.mode == Constants.ASSESSMENT_MODE:
            return self._drop_item_assessment(item_attempt)
        elif self.mode == Constants.STANDARD_MODE:
            return self._drop_item_standard(item_attempt)
        else:
            raise JsonHandlerError(
                500,
                self.i18n_service.gettext(
                    "Unknown DnDv2 mode {mode} - course is misconfigured").
                format(self.mode))

    @XBlock.json_handler
    def do_attempt(self, data, suffix=''):
        """
        Checks submitted solution and returns feedback.

        Raises:
             * JsonHandlerError with 400 error code in standard mode.
             * JsonHandlerError with 409 error code if no more attempts left
        """
        self._validate_do_attempt()

        self.attempts += 1
        # pylint: disable=fixme
        # TODO: Refactor this method to "freeze" item_state and pass it to methods that need access to it.
        # These implicit dependencies between methods exist because most of them use `item_state` or other
        # fields, either as an "input" (i.e. read value) or as output (i.e. set value) or both. As a result,
        # incorrect order of invocation causes issues:
        self._mark_complete_and_publish_grade(
        )  # must happen before _get_feedback - sets grade
        correct = self._is_answer_correct(
        )  # must happen before manipulating item_state - reads item_state

        overall_feedback_msgs, misplaced_ids = self._get_feedback(
            include_item_feedback=True)

        misplaced_items = []
        for item_id in misplaced_ids:
            # Don't delete misplaced item states on the final attempt.
            if self.attempts_remain:
                del self.item_state[item_id]
            misplaced_items.append(self._get_item_definition(int(item_id)))

        feedback_msgs = [
            FeedbackMessage(item['feedback']['incorrect'], None)
            for item in misplaced_items
        ]
        return {
            'correct': correct,
            'attempts': self.attempts,
            'grade': self._get_weighted_earned_if_set(),
            'misplaced_items': list(misplaced_ids),
            'feedback': self._present_feedback(feedback_msgs),
            'overall_feedback': self._present_feedback(overall_feedback_msgs)
        }

    @XBlock.json_handler
    def publish_event(self, data, suffix=''):
        """
        Handler to publish XBlock event from frontend
        """
        try:
            event_type = data.pop('event_type')
        except KeyError:
            return {
                'result': 'error',
                'message': 'Missing event_type in JSON data'
            }

        self.runtime.publish(self, event_type, data)
        return {'result': 'success'}

    @XBlock.json_handler
    def reset(self, data, suffix=''):
        """
        Resets problem to initial state
        """
        self.item_state = {}
        return self._get_user_state()

    @XBlock.json_handler
    def show_answer(self, data, suffix=''):
        """
        Returns correct answer in assessment mode.

        Raises:
             * JsonHandlerError with 400 error code in standard mode.
             * JsonHandlerError with 409 error code if there are still attempts left
        """
        if self.mode != Constants.ASSESSMENT_MODE:
            raise JsonHandlerError(
                400,
                self.i18n_service.gettext(
                    "show_answer handler should only be called for assessment mode"
                ))
        if self.attempts_remain:
            raise JsonHandlerError(
                409, self.i18n_service.gettext("There are attempts remaining"))

        return self._get_correct_state()

    @XBlock.json_handler
    def expand_static_url(self, url, suffix=''):
        """ AJAX-accessible handler for expanding URLs to static [image] files """
        return {'url': self._expand_static_url(url)}

    @property
    def i18n_service(self):
        """ Obtains translation service """
        i18n_service = self.runtime.service(self, "i18n")
        if i18n_service:
            return i18n_service
        else:
            return DummyTranslationService()

    @property
    def target_img_expanded_url(self):
        """ Get the expanded URL to the target image (the image items are dragged onto). """
        if self.data.get("targetImg"):
            return self._expand_static_url(self.data["targetImg"])
        else:
            return self.default_background_image_url

    @property
    def target_img_description(self):
        """ Get the description for the target image (the image items are dragged onto). """
        return self.data.get("targetImgDescription", "")

    @property
    def default_background_image_url(self):
        """ The URL to the default background image, shown when no custom background is used """
        return self.runtime.local_resource_url(self, "public/img/triangle.png")

    @property
    def attempts_remain(self):
        """
        Checks if current student still have more attempts.
        """
        return self.max_attempts is None or self.max_attempts == 0 or self.attempts < self.max_attempts

    @property
    def has_submission_deadline_passed(self):
        """
        Returns a boolean indicating if the submission is past its deadline.

        Using the `has_deadline_passed` method from InheritanceMixin which gets
        added on the LMS/Studio, return if the submission is past its due date.
        If the method not found, which happens for pure DragAndDropXblock,
        return False which makes sure submission checks don't affect other
        functionality.
        """
        if hasattr(self, "has_deadline_passed"):
            return self.has_deadline_passed()  # pylint: disable=no-member
        else:
            return False

    @XBlock.handler
    def student_view_user_state(self, request, suffix=''):
        """ GET all user-specific data, and any applicable feedback """
        data = self._get_user_state()
        return webob.Response(body=json.dumps(data).encode('utf-8'),
                              content_type='application/json')

    def _validate_do_attempt(self):
        """
        Validates if `do_attempt` handler should be executed
        """
        if self.mode != Constants.ASSESSMENT_MODE:
            raise JsonHandlerError(
                400,
                self.i18n_service.gettext(
                    "do_attempt handler should only be called for assessment mode"
                ))
        if not self.attempts_remain:
            raise JsonHandlerError(
                409,
                self.i18n_service.gettext("Max number of attempts reached"))
        if self.has_submission_deadline_passed:
            raise JsonHandlerError(
                409,
                self.i18n_service.gettext("Submission deadline has passed."))

    def _get_feedback(self, include_item_feedback=False):
        """
        Builds overall feedback for both standard and assessment modes
        """
        answer_correctness = self._answer_correctness()
        is_correct = answer_correctness == self.SOLUTION_CORRECT

        if self.mode == Constants.STANDARD_MODE or not self.attempts:
            feedback_key = 'finish' if is_correct else 'start'
            return [
                FeedbackMessage(self.data['feedback'][feedback_key], None)
            ], set()

        items = self._get_item_raw_stats()
        missing_ids = items.required - items.placed
        misplaced_ids = items.placed - items.correctly_placed

        feedback_msgs = []

        def _add_msg_if_exists(ids_list, message_template, message_class):
            """ Adds message to feedback messages if corresponding items list is not empty """
            if ids_list:
                message = message_template(len(ids_list),
                                           self.i18n_service.ngettext)
                feedback_msgs.append(FeedbackMessage(message, message_class))

        if self.item_state or include_item_feedback:
            _add_msg_if_exists(
                items.correctly_placed, FeedbackMessages.correctly_placed,
                FeedbackMessages.MessageClasses.CORRECTLY_PLACED)

            # Misplaced items are not returned to the bank on the final attempt.
            if self.attempts_remain:
                misplaced_template = FeedbackMessages.misplaced_returned
            else:
                misplaced_template = FeedbackMessages.misplaced

            _add_msg_if_exists(misplaced_ids, misplaced_template,
                               FeedbackMessages.MessageClasses.MISPLACED)
            _add_msg_if_exists(missing_ids, FeedbackMessages.not_placed,
                               FeedbackMessages.MessageClasses.NOT_PLACED)

        if self.attempts_remain and (misplaced_ids or missing_ids):
            problem_feedback_message = self.data['feedback']['start']
        else:
            problem_feedback_message = self.data['feedback']['finish']

        problem_feedback_class = self.PROBLEM_FEEDBACK_CLASSES.get(
            answer_correctness, None)
        grade_feedback_class = self.GRADE_FEEDBACK_CLASSES.get(
            answer_correctness, None)

        feedback_msgs.append(
            FeedbackMessage(problem_feedback_message, problem_feedback_class))

        if self.weight > 0:
            if self.attempts_remain:
                grade_feedback_template = FeedbackMessages.GRADE_FEEDBACK_TPL
            else:
                grade_feedback_template = FeedbackMessages.FINAL_ATTEMPT_TPL

            feedback_msgs.append(
                FeedbackMessage(
                    self.i18n_service.gettext(grade_feedback_template).format(
                        score=self.weighted_grade()), grade_feedback_class))

        return feedback_msgs, misplaced_ids

    @staticmethod
    def _present_feedback(feedback_messages):
        """
        Transforms feedback messages into format expected by frontend code
        """
        return [{
            "message": msg.message,
            "message_class": msg.message_class
        } for msg in feedback_messages if msg.message]

    def _drop_item_standard(self, item_attempt):
        """
        Handles dropping item to a zone in standard mode.
        """
        item = self._get_item_definition(item_attempt['val'])

        is_correct = self._is_attempt_correct(
            item_attempt)  # Student placed item in a correct zone
        if is_correct:  # In standard mode state is only updated when attempt is correct
            self.item_state[str(item['id'])] = self._make_state_from_attempt(
                item_attempt, is_correct)

        self._mark_complete_and_publish_grade(
        )  # must happen before _get_feedback
        self._publish_item_dropped_event(item_attempt, is_correct)

        item_feedback_key = 'correct' if is_correct else 'incorrect'
        item_feedback = FeedbackMessage(
            self._expand_static_url(item['feedback'][item_feedback_key]), None)
        overall_feedback, __ = self._get_feedback()

        return {
            'correct': is_correct,
            'grade': self._get_weighted_earned_if_set(),
            'finished': self._is_answer_correct(),
            'overall_feedback': self._present_feedback(overall_feedback),
            'feedback': self._present_feedback([item_feedback])
        }

    def _drop_item_assessment(self, item_attempt):
        """
        Handles dropping item into a zone in assessment mode
        """
        if not self.attempts_remain:
            raise JsonHandlerError(
                409,
                self.i18n_service.gettext("Max number of attempts reached"))

        item = self._get_item_definition(item_attempt['val'])
        is_correct = self._is_attempt_correct(item_attempt)
        if item_attempt['zone'] is None:
            self.item_state.pop(str(item['id']), None)
            self._publish_item_to_bank_event(item['id'], is_correct)
        else:
            # State is always updated in assessment mode to store intermediate item positions
            self.item_state[str(item['id'])] = self._make_state_from_attempt(
                item_attempt, is_correct)
            self._publish_item_dropped_event(item_attempt, is_correct)

        return {}

    def _validate_drop_item(self, item):
        """
        Validates `drop_item` parameters. Assessment mode allows returning
        items to the bank, so validation is unnecessary.
        """
        if self.mode != Constants.ASSESSMENT_MODE:
            zone = self._get_zone_by_uid(item['zone'])
            if not zone:
                raise JsonHandlerError(400, "Item zone data invalid.")

    @staticmethod
    def _make_state_from_attempt(attempt, correct):
        """
        Converts "attempt" data coming from browser into "state" entry stored in item_state
        """
        return {'zone': attempt['zone'], 'correct': correct}

    def _mark_complete_and_publish_grade(self):
        """
        Helper method to update `self.completed` and submit grade event if appropriate conditions met.
        """
        # pylint: disable=fixme
        # TODO: (arguable) split this method into "clean" functions (with no side effects and implicit state)
        # This method implicitly depends on self.item_state (via _is_answer_correct and _learner_raw_score)
        # and also updates self.raw_earned if some conditions are met. As a result this method implies some order of
        # invocation:
        # * it should be called after learner-caused updates to self.item_state is applied
        # * it should be called before self.item_state cleanup is applied (i.e. returning misplaced items to item bank)
        # * it should be called before any method that depends on self.raw_earned (i.e. self._get_feedback)

        # Splitting it into a "clean" functions will allow to capture this implicit invocation order in caller method
        # and help avoid bugs caused by invocation order violation in future.

        # There's no going back from "completed" status to "incomplete"
        self.completed = self.completed or self._is_answer_correct(
        ) or not self.attempts_remain
        current_raw_earned = self._learner_raw_score()
        # ... and from higher grade to lower
        # if we have an old-style (i.e. unreliable) grade, override no matter what
        saved_raw_earned = self._get_raw_earned_if_set()

        current_raw_earned_is_greater = False
        if current_raw_earned is None or saved_raw_earned is None:
            current_raw_earned_is_greater = True

        if current_raw_earned is not None and saved_raw_earned is not None and current_raw_earned > saved_raw_earned:
            current_raw_earned_is_greater = True

        if current_raw_earned is None or current_raw_earned_is_greater:
            self.raw_earned = current_raw_earned
            self._publish_grade(Score(self.raw_earned, self.max_score()))

        # and no matter what - emit progress event for current user
        self.runtime.publish(self, "progress", {})

    def _publish_item_dropped_event(self, attempt, is_correct):
        """
        Publishes item dropped event.
        """
        item = self._get_item_definition(attempt['val'])
        # attempt should already be validated here - not doing the check for existing zone again
        zone = self._get_zone_by_uid(attempt['zone'])

        item_label = item.get("displayName")
        if not item_label:
            item_label = item.get("imageURL")

        self.runtime.publish(
            self, 'edx.drag_and_drop_v2.item.dropped', {
                'item': item_label,
                'item_id': item['id'],
                'location': zone.get("title"),
                'location_id': zone.get("uid"),
                'is_correct': is_correct,
            })

    def _publish_item_to_bank_event(self, item_id, is_correct):
        """
        Publishes event when item moved back to the bank in assessment mode.
        """
        item = self._get_item_definition(item_id)

        item_label = item.get("displayName")
        if not item_label:
            item_label = item.get("imageURL")

        self.runtime.publish(
            self, 'edx.drag_and_drop_v2.item.dropped', {
                'item': item_label,
                'item_id': item['id'],
                'location': 'item bank',
                'location_id': -1,
                'is_correct': is_correct,
            })

    def _is_attempt_correct(self, attempt):
        """
        Check if the item was placed correctly.
        """
        correct_zones = self.get_item_zones(attempt['val'])
        if correct_zones == [] and attempt[
                'zone'] is None and self.mode == Constants.ASSESSMENT_MODE:
            return True
        return attempt['zone'] in correct_zones

    def _expand_static_url(self, url):
        """
        This is required to make URLs like '/static/dnd-test-image.png' work (note: that is the
        only portable URL format for static files that works across export/import and reruns).
        This method is unfortunately a bit hackish since XBlock does not provide a low-level API
        for this.
        """
        if hasattr(self.runtime, 'replace_urls'):
            url = self.runtime.replace_urls(u'"{}"'.format(url))[1:-1]
        elif hasattr(self.runtime, 'course_id'):
            # edX Studio uses a different runtime for 'studio_view' than 'student_view',
            # and the 'studio_view' runtime doesn't provide the replace_urls API.
            try:
                from static_replace import replace_static_urls  # pylint: disable=import-error
                url = replace_static_urls(
                    u'"{}"'.format(url),
                    None,
                    course_id=self.runtime.course_id)[1:-1]
            except ImportError:
                pass
        return url

    def _get_user_state(self):
        """ Get all user-specific data, and any applicable feedback """
        item_state = self._get_item_state()
        # In assessment mode, we do not want to leak the correctness info for individual items to the frontend,
        # so we remove "correct" from all items when in assessment mode.
        if self.mode == Constants.ASSESSMENT_MODE:
            for item in item_state.values():
                del item["correct"]

        overall_feedback_msgs, __ = self._get_feedback()
        if self.mode == Constants.STANDARD_MODE:
            is_finished = self._is_answer_correct()
        else:
            is_finished = not self.attempts_remain
        return {
            'items': item_state,
            'finished': is_finished,
            'attempts': self.attempts,
            'grade': self._get_weighted_earned_if_set(),
            'overall_feedback': self._present_feedback(overall_feedback_msgs)
        }

    def _get_correct_state(self):
        """
        Returns one of the possible correct states for the configured data.
        """
        state = {}
        items = copy.deepcopy(self.data.get('items', []))
        for item in items:
            zones = item.get('zones')

            # For backwards compatibility
            if zones is None:
                zones = []
                zone = item.get('zone')
                if zone is not None and zone != 'none':
                    zones.append(zone)

            if zones:
                zone = zones.pop()
                state[str(item['id'])] = {
                    'zone': zone,
                    'correct': True,
                }

        return {'items': state}

    def _get_item_state(self):
        """
        Returns a copy of the user item state.
        Converts to a dict if data is stored in legacy tuple form.
        """

        # IMPORTANT: this method should always return a COPY of self.item_state - it is called from
        # student_view_user_state handler and the data it returns is manipulated there to hide
        # correctness of items placed.
        state = {}
        migrator = StateMigration(self)

        for item_id, item in six.iteritems(self.item_state):
            state[item_id] = migrator.apply_item_state_migrations(
                item_id, item)

        return state

    def _get_item_definition(self, item_id):
        """
        Returns definition (settings) for item identified by `item_id`.
        """
        return next(i for i in self.data['items'] if i['id'] == item_id)

    def get_item_zones(self, item_id):
        """
        Returns a list of the zones that are valid options for the item.

        If the item is configured with a list of zones, return that list. If
        the item is configured with a single zone, encapsulate that zone's
        ID in a list and return the list. If the item is not configured with
        any zones, or if it's configured explicitly with no zones, return an
        empty list.
        """
        item = self._get_item_definition(item_id)
        if item.get('zones') is not None:
            return item.get('zones')
        elif item.get('zone') is not None and item.get('zone') != 'none':
            return [item.get('zone')]
        else:
            return []

    @property
    def zones(self):
        """
        Get drop zone data, defined by the author.
        """
        # Convert zone data from old to new format if necessary
        migrator = StateMigration(self)
        return [
            migrator.apply_zone_migrations(zone)
            for zone in self.data.get('zones', [])
        ]

    def _get_zone_by_uid(self, uid):
        """
        Given a zone UID, return that zone, or None.
        """
        for zone in self.zones:
            if zone["uid"] == uid:
                return zone

    def _get_item_stats(self):
        """
        Returns a tuple representing the number of correctly placed items,
        and the total number of items required (including decoy items).
        """
        items = self._get_item_raw_stats()

        correct_count = len(items.correctly_placed) + len(items.decoy_in_bank)
        total_count = len(items.required) + len(items.decoy)

        return correct_count, total_count

    def _get_item_raw_stats(self):
        """
        Returns a named tuple containing required, decoy, placed, correctly
        placed, and correctly unplaced decoy items.

        Returns:
            namedtuple: (required, placed, correctly_placed, decoy, decoy_in_bank)
                * required - IDs of items that must be placed on the board
                * placed - IDs of items actually placed on the board
                * correctly_placed - IDs of items that were placed correctly
                * decoy - IDs of decoy items
                * decoy_in_bank - IDs of decoy items that were unplaced
        """
        item_state = self._get_item_state()

        all_items = set(str(item['id']) for item in self.data['items'])
        required = set(item_id for item_id in all_items
                       if self.get_item_zones(int(item_id)) != [])
        placed = set(item_id for item_id in all_items if item_id in item_state)
        correctly_placed = set(item_id for item_id in placed
                               if item_state[item_id]['correct'])
        decoy = all_items - required
        decoy_in_bank = set(item_id for item_id in decoy
                            if item_id not in item_state)

        return ItemStats(required, placed, correctly_placed, decoy,
                         decoy_in_bank)

    def _get_raw_earned_if_set(self):
        """
        Returns student's grade if already explicitly set, otherwise returns None.
        This is different from self.raw_earned which returns 0 by default.
        """
        if self.fields['raw_earned'].is_set_on(self):
            return self.raw_earned
        else:
            return None

    def _get_weighted_earned_if_set(self):
        """
        Returns student's grade with the problem weight applied if set, otherwise
        None.
        """
        if self.fields['raw_earned'].is_set_on(self):
            return self.weighted_grade()
        else:
            return None

    def _answer_correctness(self):
        """
        Checks answer correctness:

        Returns:
            string: Correct/Incorrect/Partial
                * Correct: All items are at their correct place.
                * Partial: Some items are at their correct place.
                * Incorrect: None items are at their correct place.
        """
        correct_count, total_count = self._get_item_stats()
        if correct_count == total_count:
            return self.SOLUTION_CORRECT
        elif correct_count == 0:
            return self.SOLUTION_INCORRECT
        else:
            return self.SOLUTION_PARTIAL

    def _is_answer_correct(self):
        """
        Helper - checks if answer is correct

        Returns:
            bool: True if current answer is correct
        """
        return self._answer_correctness() == self.SOLUTION_CORRECT

    @staticmethod
    def workbench_scenarios():
        """
        A canned scenario for display in the workbench.
        """
        return [
            ("Drag-and-drop-v2 standard",
             "<vertical_demo><drag-and-drop-v2/></vertical_demo>"),
            ("Drag-and-drop-v2 assessment",
             "<vertical_demo><drag-and-drop-v2 mode='assessment' max_attempts='3'/></vertical_demo>"
             ),
        ]
コード例 #20
0
class CombinedOpenEndedFields(object):
    display_name = String(
        display_name="Display Name",
        help=
        "This name appears in the horizontal navigation at the top of the page.",
        default="Open Response Assessment",
        scope=Scope.settings)
    current_task_number = Integer(help="Current task that the student is on.",
                                  default=0,
                                  scope=Scope.user_state)
    old_task_states = List(help=(
        "A list of lists of state dictionaries for student states that are saved."
        "This field is only populated if the instructor changes tasks after"
        "the module is created and students have attempted it (for example changes a self assessed problem to "
        "self and peer assessed."),
                           scope=Scope.user_state)
    task_states = List(
        help="List of state dictionaries of each task within this module.",
        scope=Scope.user_state)
    state = String(
        help="Which step within the current task that the student is on.",
        default="initial",
        scope=Scope.user_state)
    graded = Boolean(
        display_name="Graded",
        help=
        'Defines whether the student gets credit for grading this problem.',
        default=False,
        scope=Scope.settings)
    student_attempts = Integer(
        help="Number of attempts taken by the student on this problem",
        default=0,
        scope=Scope.user_state)
    ready_to_reset = Boolean(
        help="If the problem is ready to be reset or not.",
        default=False,
        scope=Scope.user_state)
    max_attempts = Integer(
        display_name="Maximum Attempts",
        help="The number of times the student can try to answer this problem.",
        default=1,
        scope=Scope.settings,
        values={"min": 1})
    accept_file_upload = Boolean(
        display_name="Allow File Uploads",
        help="Whether or not the student can submit files as a response.",
        default=False,
        scope=Scope.settings)
    skip_spelling_checks = Boolean(
        display_name="Disable Quality Filter",
        help=
        "If False, the Quality Filter is enabled and submissions with poor spelling, short length, or poor grammar will not be peer reviewed.",
        default=False,
        scope=Scope.settings)
    due = Date(help="Date that this problem is due by", scope=Scope.settings)
    extended_due = Date(
        help="Date that this problem is due by for a particular student. This "
        "can be set by an instructor, and will override the global due "
        "date if it is set to a date that is later than the global due "
        "date.",
        default=None,
        scope=Scope.user_state,
    )
    graceperiod = Timedelta(
        help=
        "Amount of time after the due date that submissions will be accepted",
        scope=Scope.settings)
    version = VersionInteger(help="Current version number",
                             default=DEFAULT_VERSION,
                             scope=Scope.settings)
    data = String(help="XML data for the problem",
                  scope=Scope.content,
                  default=DEFAULT_DATA)
    weight = Float(
        display_name="Problem Weight",
        help=
        "Defines the number of points each problem is worth. If the value is not set, each problem is worth one point.",
        scope=Scope.settings,
        values={
            "min": 0,
            "step": ".1"
        },
        default=1)
    min_to_calibrate = Integer(
        display_name="Minimum Peer Grading Calibrations",
        help=
        "The minimum number of calibration essays each student will need to complete for peer grading.",
        default=3,
        scope=Scope.settings,
        values={
            "min": 1,
            "max": 20,
            "step": "1"
        })
    max_to_calibrate = Integer(
        display_name="Maximum Peer Grading Calibrations",
        help=
        "The maximum number of calibration essays each student will need to complete for peer grading.",
        default=6,
        scope=Scope.settings,
        values={
            "min": 1,
            "max": 20,
            "step": "1"
        })
    peer_grader_count = Integer(
        display_name="Peer Graders per Response",
        help="The number of peers who will grade each submission.",
        default=3,
        scope=Scope.settings,
        values={
            "min": 1,
            "step": "1",
            "max": 5
        })
    required_peer_grading = Integer(
        display_name="Required Peer Grading",
        help=
        "The number of other students each student making a submission will have to grade.",
        default=3,
        scope=Scope.settings,
        values={
            "min": 1,
            "step": "1",
            "max": 5
        })
    peer_grade_finished_submissions_when_none_pending = Boolean(
        display_name='Allow "overgrading" of peer submissions',
        help=
        ("EXPERIMENTAL FEATURE.  Allow students to peer grade submissions that already have the requisite number of graders, "
         "but ONLY WHEN all submissions they are eligible to grade already have enough graders.  "
         "This is intended for use when settings for `Required Peer Grading` > `Peer Graders per Response`"
         ),
        default=False,
        scope=Scope.settings,
    )
    markdown = String(help="Markdown source of this module",
                      default=textwrap.dedent("""\
                    [prompt]
                        <h3>Censorship in the Libraries</h3>

                        <p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
                        </p>

                        <p>
                        Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
                        </p>
                    [prompt]
                    [rubric]
                    + Ideas
                    - Difficult for the reader to discern the main idea.  Too brief or too repetitive to establish or maintain a focus.
                    - Attempts a main idea.  Sometimes loses focus or ineffectively displays focus.
                    - Presents a unifying theme or main idea, but may include minor tangents.  Stays somewhat focused on topic and task.
                    - Presents a unifying theme or main idea without going off on tangents.  Stays completely focused on topic and task.
                    + Content
                    - Includes little information with few or no details or unrelated details.  Unsuccessful in attempts to explore any facets of the topic.
                    - Includes little information and few or no details.  Explores only one or two facets of the topic.
                    - Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.)  Explores some facets of the topic.
                    - Includes in-depth information and exceptional supporting details that are fully developed.  Explores all facets of the topic.
                    + Organization
                    - Ideas organized illogically, transitions weak, and response difficult to follow.
                    - Attempts to logically organize ideas.  Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
                    - Ideas organized logically.  Progresses in an order that enhances meaning.  Includes smooth transitions.
                    + Style
                    - Contains limited vocabulary, with many words used incorrectly.  Demonstrates problems with sentence patterns.
                    - Contains basic vocabulary, with words that are predictable and common.  Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
                    - Includes vocabulary to make explanations detailed and precise.  Includes varied sentence patterns, including complex sentences.
                    + Voice
                    - Demonstrates language and tone that may be inappropriate to task and reader.
                    - Demonstrates an attempt to adjust language and tone to task and reader.
                    - Demonstrates effective adjustment of language and tone to task and reader.
                    [rubric]
                    [tasks]
                    (Self), ({4-12}AI), ({9-12}Peer)
                    [tasks]

        """),
                      scope=Scope.settings)
コード例 #21
0
ファイル: lms_mixin.py プロジェクト: skim-ks/edx-ora2
class LmsCompatibilityMixin(object):
    """
    Extra fields and methods used by LMS/Studio.
    """
    # Studio the default value for this field to show this XBlock
    # in the list of "Advanced Components"
    display_name = String(default="Peer Assessment",
                          scope=Scope.settings,
                          help="Display name")

    start = DateTime(
        default=None,
        scope=Scope.settings,
        help=
        "ISO-8601 formatted string representing the start date of this assignment."
    )

    due = DateTime(
        default=None,
        scope=Scope.settings,
        help=
        "ISO-8601 formatted string representing the due date of this assignment."
    )

    weight = Float(
        display_name="Problem Weight",
        help=("Defines the number of points each problem is worth. "
              "If the value is not set, the problem is worth the sum of the "
              "option point values."),
        values={
            "min": 0,
            "step": .1
        },
        scope=Scope.settings)

    def has_dynamic_children(self):
        """Do we dynamically determine our children? No, we don't have any.

        The LMS wants to know this to see if it has to instantiate our module
        and query it to find the children, or whether it can just trust what's
        in the static (cheaper) children listing.
        """
        return False

    @property
    def has_score(self):
        """Are we a scored type (read: a problem). Yes.

        For LMS Progress page/grades download purposes, we're always going to
        have a score, even if it's just 0 at the start.
        """
        return True

    def max_score(self):
        """The maximum raw score of our problem.

        Called whenever the LMS knows that something is scorable, but finds no
        recorded raw score for it (i.e. the student hasn't done it). In that
        case, the LMS knows that the earned score is 0, but it doesn't know what
        to put in the denominator. So we supply it with the total number of
        points that it is possible for us to earn -- the sum of the highest
        pointed options from each criterion.

        Note that if we have already recorded a score in submissions, this
        method will never be called. So it's perfectly possible for us to have
        10/10 on the progress page and a 12 returning from this method if our
        10/10 score was earned in the past and the problem has changed since
        then.
        """
        return sum(
            max(option["points"] for option in criterion["options"])
            for criterion in self.rubric_criteria)
コード例 #22
0
ファイル: slider.py プロジェクト: naeem91/problem-builder
class SliderBlock(
        SubmittingXBlockMixin,
        QuestionMixin,
        StudioEditableXBlockMixin,
        XBlockWithTranslationServiceMixin,
        StudentViewUserStateMixin,
        XBlock,
):
    """
    An XBlock used by students to indicate a numeric value on a sliding scale.
    The student's answer is always considered "correct".
    """
    CATEGORY = 'pb-slider'
    STUDIO_LABEL = _(u"Ranged Value Slider")
    USER_STATE_FIELDS = ['student_value']

    answerable = True

    min_label = String(
        display_name=_("Low"),
        help=_("Label for low end of the range"),
        scope=Scope.content,
        default=_("0%"),
    )
    max_label = String(
        display_name=_("High"),
        help=_("Label for high end of the range"),
        scope=Scope.content,
        default=_("100%"),
    )

    question = String(
        display_name=_("Question"),
        help=_("Question to ask the student (optional)"),
        scope=Scope.content,
        default="",
        multiline_editor=True,
    )

    student_value = Float(
        # The value selected by the student
        default=None,
        scope=Scope.user_state,
    )

    editable_fields = ('min_label', 'max_label', 'display_name', 'question',
                       'show_title')

    @property
    def url_name(self):
        """
        Get the url_name for this block. In Studio/LMS it is provided by a mixin, so we just
        defer to super(). In the workbench or any other platform, we use the name.
        """
        try:
            return super(SliderBlock, self).url_name
        except AttributeError:
            return self.name

    def mentoring_view(self, context):
        """ Main view of this block """
        context = context.copy() if context else {}
        context['question'] = self.question
        context['slider_id'] = 'pb-slider-{}'.format(uuid.uuid4().hex[:20])
        context['initial_value'] = int(
            self.student_value * 100) if self.student_value is not None else 50
        context['min_label'] = self.min_label
        context['max_label'] = self.max_label
        context['title'] = self.display_name_with_default
        context['hide_header'] = context.get('hide_header',
                                             False) or not self.show_title
        context['instructions_string'] = self._(
            "Select a value from {min_label} to {max_label}").format(
                min_label=self.min_label, max_label=self.max_label)
        html = loader.render_django_template('templates/html/slider.html',
                                             context)

        fragment = Fragment(html)
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/slider.js'))
        fragment.initialize_js('SliderBlock')
        return fragment

    student_view = mentoring_view
    preview_view = mentoring_view

    def student_view_data(self, context=None):
        return {
            'id': self.name,
            'block_id': six.text_type(self.scope_ids.usage_id),
            'display_name': self.display_name_with_default,
            'type': self.CATEGORY,
            'question': self.question,
            'min_label': self.min_label,
            'max_label': self.max_label,
            'title': self.display_name_with_default,
            'hide_header': not self.show_title,
        }

    def author_view(self, context):
        """
        Add some HTML to the author view that allows authors to see the ID of the block, so they
        can refer to it in other blocks such as Plot blocks.
        """
        context[
            'hide_header'] = True  # Header is already shown in the Studio wrapper
        fragment = self.student_view(context)
        fragment.add_content(
            loader.render_django_template(
                'templates/html/slider_edit_footer.html',
                {"url_name": self.url_name}))
        return fragment

    def get_last_result(self):
        """ Return the current/last result in the required format """
        if self.student_value is None:
            return {}
        return {
            'submission': self.student_value,
            'status': 'correct',
            'tips': [],
            'weight': self.weight,
            'score': 1,
        }

    def get_results(self, _previous_result_unused=None):
        """ Alias for get_last_result() """
        return self.get_last_result()

    def submit(self, value):
        log.debug(u'Received Slider submission: "%s"', value)
        if value < 0 or value > 1:
            return {}  # Invalid
        self.student_value = value
        if sub_api:
            # Also send to the submissions API:
            sub_api.create_submission(self.student_item_key, value)
        result = self.get_last_result()
        log.debug(u'Slider submission result: %s', result)
        return result

    def get_submission_display(self, submission):
        """
        Get the human-readable version of a submission value
        """
        return submission * 100

    def validate_field_data(self, validation, data):
        """
        Validate this block's field data.
        """
        super(SliderBlock, self).validate_field_data(validation, data)
コード例 #23
0
class FreeTextResponse(
        EnforceDueDates,
        MissingDataFetcherMixin,
        StudioEditableXBlockMixin,
        XBlock,
):
    #  pylint: disable=too-many-ancestors, too-many-instance-attributes
    """
    Enables instructors to create questions with free-text responses.
    """

    loader = ResourceLoader(__name__)

    @staticmethod
    def workbench_scenarios():
        """
        Gather scenarios to be displayed in the workbench
        """
        scenarios = [
            ('Free-text Response XBlock', '''<sequence_demo>
                    <freetextresponse />
                    <freetextresponse name='My First XBlock' />
                    <freetextresponse
                        display_name="Full Credit is asdf, half is fdsa"
                        fullcredit_keyphrases="['asdf']"
                        halfcredit_keyphrases="['fdsa']"
                        min_word_count="2"
                        max_word_count="2"
                        max_attempts="5"
                    />
                    <freetextresponse
                        display_name="Min words 2"
                        min_word_count="2"
                    />
                    <freetextresponse
                        display_name="Max Attempts 5 XBlock"
                        max_attempts="5"
                    />
                    <freetextresponse
                        display_name="Full credit is asdf, Max Attempts 3"
                        max_attempts="3"
                        min_word_count="2"
                        fullcredit_keyphrases="['asdf']"
                    />
                    <freetextresponse
                        display_name="New submitted message"
                        submitted_message="Different message"
                    />
                    <freetextresponse
                        display_name="Blank submitted message"
                        submitted_message=""
                    />
                    <freetextresponse
                        display_name="Display correctness if off"
                        display_correctness="False"
                    />
                </sequence_demo>
             '''),
        ]
        return scenarios

    display_correctness = Boolean(
        display_name=_('Display Correctness?'),
        help=_('This is a flag that indicates if the indicator '
               'icon should be displayed after a student enters '
               'their response'),
        default=True,
        scope=Scope.settings,
    )
    display_other_student_responses = Boolean(
        display_name=_('Display Other Student Responses'),
        help=_('This will display other student responses to the '
               'student after they submit their response.'),
        default=False,
        scope=Scope.settings,
    )
    displayable_answers = List(
        default=[],
        scope=Scope.user_state_summary,
        help=_('System selected answers to give to students'),
    )
    display_name = String(
        display_name=_('Display Name'),
        help=_('This is the title for this question type'),
        default='Free-text Response',
        scope=Scope.settings,
    )
    fullcredit_keyphrases = List(
        display_name=_('Full-Credit Key Phrases'),
        help=_('This is a list of words or phrases, one of '
               'which must be present in order for the student\'s answer '
               'to receive full credit'),
        default=[],
        scope=Scope.settings,
    )
    halfcredit_keyphrases = List(
        display_name=_('Half-Credit Key Phrases'),
        help=_('This is a list of words or phrases, one of '
               'which must be present in order for the student\'s answer '
               'to receive half credit'),
        default=[],
        scope=Scope.settings,
    )
    max_attempts = Integer(
        display_name=_('Maximum Number of Attempts'),
        help=_('This is the maximum number of times a '
               'student is allowed to attempt the problem'),
        default=0,
        values={'min': 1},
        scope=Scope.settings,
    )
    max_word_count = Integer(
        display_name=_('Maximum Word Count'),
        help=_('This is the maximum number of words allowed for this '
               'question'),
        default=10000,
        values={'min': 1},
        scope=Scope.settings,
    )
    min_word_count = Integer(
        display_name=_('Minimum Word Count'),
        help=_('This is the minimum number of words required '
               'for this question'),
        default=1,
        values={'min': 1},
        scope=Scope.settings,
    )
    prompt = String(
        display_name=_('Prompt'),
        help=_('This is the prompt students will see when '
               'asked to enter their response'),
        default='Please enter your response within this text area',
        scope=Scope.settings,
        multiline_editor=True,
    )
    submitted_message = String(
        display_name=_('Submission Received Message'),
        help=_('This is the message students will see upon '
               'submitting their response'),
        default='Your submission has been received',
        scope=Scope.settings,
    )
    weight = Integer(
        display_name=_('Weight'),
        help=_('This assigns an integer value representing '
               'the weight of this problem'),
        default=0,
        values={'min': 1},
        scope=Scope.settings,
    )
    saved_message = String(
        display_name=_('Draft Received Message'),
        help=_('This is the message students will see upon '
               'submitting a draft response'),
        default=('Your answers have been saved but not graded. '
                 'Click "Submit" to grade them.'),
        scope=Scope.settings,
    )

    count_attempts = Integer(
        default=0,
        scope=Scope.user_state,
    )
    score = Float(
        default=0.0,
        scope=Scope.user_state,
    )
    student_answer = String(
        default='',
        scope=Scope.user_state,
    )

    has_score = True

    editable_fields = (
        'display_name',
        'prompt',
        'weight',
        'max_attempts',
        'display_correctness',
        'min_word_count',
        'max_word_count',
        'fullcredit_keyphrases',
        'halfcredit_keyphrases',
        'submitted_message',
        'display_other_student_responses',
        'saved_message',
    )

    def build_fragment(
        self,
        rendered_template,
        initialize_js_func,
        additional_css=[],
        additional_js=[],
    ):
        #  pylint: disable=dangerous-default-value, too-many-arguments
        """
        Creates a fragment for display.
        """
        fragment = Fragment(rendered_template)
        for item in additional_css:
            url = self.runtime.local_resource_url(self, item)
            fragment.add_css_url(url)
        for item in additional_js:
            url = self.runtime.local_resource_url(self, item)
            fragment.add_javascript_url(url)
        fragment.initialize_js(initialize_js_func)
        return fragment

    # Decorate the view in order to support multiple devices e.g. mobile
    # See: https://openedx.atlassian.net/wiki/display/MA/Course+Blocks+API
    # section 'View @supports(multi_device) decorator'
    @XBlock.supports('multi_device')
    def student_view(self, context={}):
        # pylint: disable=dangerous-default-value
        """The main view of FreeTextResponse, displayed when viewing courses.

        The main view which displays the general layout for FreeTextResponse

        Args:
            context: Not used for this view.

        Returns:
            (Fragment): The HTML Fragment for this XBlock, which determines the
            general frame of the FreeTextResponse Question.
        """
        display_other_responses = self.display_other_student_responses
        self.runtime.service(self, 'i18n')
        context.update({
            'display_name':
            self.display_name,
            'indicator_class':
            self._get_indicator_class(),
            'nodisplay_class':
            self._get_nodisplay_class(),
            'problem_progress':
            self._get_problem_progress(),
            'prompt':
            self.prompt,
            'student_answer':
            self.student_answer,
            'is_past_due':
            self.is_past_due(),
            'used_attempts_feedback':
            self._get_used_attempts_feedback(),
            'visibility_class':
            self._get_indicator_visibility_class(),
            'word_count_message':
            self._get_word_count_message(),
            'display_other_responses':
            display_other_responses,
            'other_responses':
            self.get_other_answers(),
        })
        template = self.loader.render_django_template(
            'templates/freetextresponse_view.html',
            context=Context(context),
            i18n_service=self.runtime.service(self, 'i18n'),
        )
        fragment = self.build_fragment(
            template,
            initialize_js_func='FreeTextResponseView',
            additional_css=[
                'public/view.css',
            ],
            additional_js=[
                'public/view.js',
            ],
        )
        return fragment

    def max_score(self):
        """
        Returns the configured number of possible points for this component.
        Arguments:
            None
        Returns:
            float: The number of possible points for this component
        """
        return self.weight

    @classmethod
    def _generate_validation_message(cls, msg):
        """
        Helper method to generate a ValidationMessage from
        the supplied string
        """
        result = ValidationMessage(ValidationMessage.ERROR,
                                   ugettext(unicode(msg)))
        return result

    def validate_field_data(self, validation, data):
        """
        Validates settings entered by the instructor.
        """
        if data.weight < 0:
            msg = FreeTextResponse._generate_validation_message(
                'Weight Attempts cannot be negative')
            validation.add(msg)
        if data.max_attempts < 0:
            msg = FreeTextResponse._generate_validation_message(
                'Maximum Attempts cannot be negative')
            validation.add(msg)
        if data.min_word_count < 1:
            msg = FreeTextResponse._generate_validation_message(
                'Minimum Word Count cannot be less than 1')
            validation.add(msg)
        if data.min_word_count > data.max_word_count:
            msg = FreeTextResponse._generate_validation_message(
                'Minimum Word Count cannot be greater than Max Word Count')
            validation.add(msg)
        if not data.submitted_message:
            msg = FreeTextResponse._generate_validation_message(
                'Submission Received Message cannot be blank')
            validation.add(msg)

    def _get_indicator_visibility_class(self):
        """
        Returns the visibility class for the correctness indicator html element
        """
        if self.display_correctness:
            result = ''
        else:
            result = 'hidden'
        return result

    def _get_word_count_message(self):
        """
        Returns the word count message
        """
        result = ungettext(
            "Your response must be "
            "between {min} and {max} word.",
            "Your response must be "
            "between {min} and {max} words.",
            self.max_word_count,
        ).format(
            min=self.min_word_count,
            max=self.max_word_count,
        )
        return result

    def _get_invalid_word_count_message(self, ignore_attempts=False):
        """
        Returns the invalid word count message
        """
        result = ''
        if ((ignore_attempts or self.count_attempts > 0)
                and (not self._word_count_valid())):
            word_count_message = self._get_word_count_message()
            result = ugettext(
                "Invalid Word Count. {word_count_message}").format(
                    word_count_message=word_count_message, )
        return result

    def _get_indicator_class(self):
        """
        Returns the class of the correctness indicator element
        """
        result = 'unanswered'
        if self.display_correctness and self._word_count_valid():
            if self._determine_credit() == Credit.zero:
                result = 'incorrect'
            else:
                result = 'correct'
        return result

    def _word_count_valid(self):
        """
        Returns a boolean value indicating whether the current
        word count of the user's answer is valid
        """
        word_count = len(self.student_answer.split())
        result = (word_count <= self.max_word_count
                  and word_count >= self.min_word_count)
        return result

    @classmethod
    def _is_at_least_one_phrase_present(cls, phrases, answer):
        """
        Determines if at least one of the supplied phrases is
        present in the given answer
        """
        answer = answer.lower()
        matches = [phrase.lower() in answer for phrase in phrases]
        return any(matches)

    def _get_problem_progress(self):
        """
        Returns a statement of progress for the XBlock, which depends
        on the user's current score
        """
        if self.weight == 0:
            result = ''
        elif self.score == 0.0:
            result = "({})".format(
                ungettext(
                    "{weight} point possible",
                    "{weight} points possible",
                    self.weight,
                ).format(weight=self.weight, ))
        else:
            scaled_score = self.score * self.weight
            # No trailing zero and no scientific notation
            score_string = ('%.15f' % scaled_score).rstrip('0').rstrip('.')
            result = "({})".format(
                ungettext(
                    "{score_string}/{weight} point",
                    "{score_string}/{weight} points",
                    self.weight,
                ).format(
                    score_string=score_string,
                    weight=self.weight,
                ))
        return result

    def _compute_score(self):
        """
        Computes and publishes the user's core for the XBlock
        based on their answer
        """
        credit = self._determine_credit()
        self.score = credit.value
        try:
            self.runtime.publish(self, 'grade', {
                'value': self.score,
                'max_value': Credit.full.value
            })
        except IntegrityError:
            pass

    def _determine_credit(self):
        #  Not a standard xlbock pylint disable.
        # This is a problem with pylint 'enums and R0204 in general'
        """
        Helper Method that determines the level of credit that
        the user should earn based on their answer
        """
        result = None
        if self.student_answer == '' or not self._word_count_valid():
            result = Credit.zero
        elif not self.fullcredit_keyphrases \
                and not self.halfcredit_keyphrases:
            result = Credit.full
        elif FreeTextResponse._is_at_least_one_phrase_present(
                self.fullcredit_keyphrases, self.student_answer):
            result = Credit.full
        elif FreeTextResponse._is_at_least_one_phrase_present(
                self.halfcredit_keyphrases, self.student_answer):
            result = Credit.half
        else:
            result = Credit.zero
        return result

    def _get_used_attempts_feedback(self):
        """
        Returns the text with feedback to the user about the number of attempts
        they have used if applicable
        """
        result = ''
        if self.max_attempts > 0:
            result = ungettext(
                'You have used {count_attempts} of {max_attempts} submission',
                'You have used {count_attempts} of {max_attempts} submissions',
                self.max_attempts,
            ).format(
                count_attempts=self.count_attempts,
                max_attempts=self.max_attempts,
            )
        return result

    def _get_nodisplay_class(self):
        """
        Returns the css class for the submit button
        """
        result = ''
        if self.max_attempts > 0 and self.count_attempts >= self.max_attempts:
            result = 'nodisplay'
        return result

    def _get_submitted_message(self):
        """
        Returns the message to display in the submission-received div
        """
        result = ''
        if self._word_count_valid():
            result = self.submitted_message
        return result

    def _get_user_alert(self, ignore_attempts=False):
        """
        Returns the message to display in the user_alert div
        depending on the student answer
        """
        result = ''
        if not self._word_count_valid():
            result = self._get_invalid_word_count_message(ignore_attempts)
        return result

    def _can_submit(self):
        if self.is_past_due():
            return False
        if self.max_attempts == 0:
            return True
        if self.count_attempts < self.max_attempts:
            return True
        return False

    @XBlock.json_handler
    def submit(self, data, suffix=''):
        # pylint: disable=unused-argument
        """
        Processes the user's submission
        """
        # Fails if the UI submit/save buttons were shut
        # down on the previous sumbisson
        if self._can_submit():
            self.student_answer = data['student_answer']
            # Counting the attempts and publishing a score
            # even if word count is invalid.
            self.count_attempts += 1
            self._compute_score()
            display_other_responses = self.display_other_student_responses
            if display_other_responses and data.get('can_record_response'):
                self.store_student_response()
        result = {
            'status': 'success',
            'problem_progress': self._get_problem_progress(),
            'indicator_class': self._get_indicator_class(),
            'used_attempts_feedback': self._get_used_attempts_feedback(),
            'nodisplay_class': self._get_nodisplay_class(),
            'submitted_message': self._get_submitted_message(),
            'user_alert': self._get_user_alert(ignore_attempts=True, ),
            'other_responses': self.get_other_answers(),
            'display_other_responses': self.display_other_student_responses,
            'visibility_class': self._get_indicator_visibility_class(),
        }
        return result

    @XBlock.json_handler
    def save_reponse(self, data, suffix=''):
        # pylint: disable=unused-argument
        """
        Processes the user's save
        """
        # Fails if the UI submit/save buttons were shut
        # down on the previous sumbisson
        if self.max_attempts == 0 or self.count_attempts < self.max_attempts:
            self.student_answer = data['student_answer']
        result = {
            'status': 'success',
            'problem_progress': self._get_problem_progress(),
            'used_attempts_feedback': self._get_used_attempts_feedback(),
            'nodisplay_class': self._get_nodisplay_class(),
            'submitted_message': '',
            'user_alert': self.saved_message,
            'visibility_class': self._get_indicator_visibility_class(),
        }
        return result

    def store_student_response(self):
        """
        Submit a student answer to the answer pool by appending the given
        answer to the end of the list.
        """
        # if the answer is wrong, do not display it
        if self.score != Credit.full.value:
            return

        student_id = self.get_student_id()
        # remove any previous answers the student submitted
        for index, response in enumerate(self.displayable_answers):
            if response['student_id'] == student_id:
                del self.displayable_answers[index]
                break

        self.displayable_answers.append({
            'student_id': student_id,
            'answer': self.student_answer,
        })

        # Want to store extra response so student can still see
        # MAX_RESPONSES answers if their answer is in the pool.
        response_index = -(MAX_RESPONSES + 1)
        self.displayable_answers = self.displayable_answers[response_index:]

    def get_other_answers(self):
        """
        Returns at most MAX_RESPONSES answers from the pool.

        Does not return answers the student had submitted.
        """
        student_id = self.get_student_id()
        display_other_responses = self.display_other_student_responses
        shouldnt_show_other_responses = not display_other_responses
        student_answer_incorrect = self._determine_credit() == Credit.zero
        if student_answer_incorrect or shouldnt_show_other_responses:
            return []
        return_list = [
            response for response in self.displayable_answers
            if response['student_id'] != student_id
        ]

        return_list = return_list[-(MAX_RESPONSES):]
        return return_list
コード例 #24
0
class VideoFields(object):
    """Fields for `VideoModule` and `VideoDescriptor`."""
    display_name = String(display_name="Display Name",
                          help="Display name for this module.",
                          default="Video",
                          scope=Scope.settings)
    saved_video_position = RelativeTime(help="Current position in the video",
                                        scope=Scope.user_state,
                                        default=datetime.timedelta(seconds=0))
    # TODO: This should be moved to Scope.content, but this will
    # require data migration to support the old video module.
    youtube_id_1_0 = String(
        help="This is the Youtube ID reference for the normal speed video.",
        display_name="Youtube ID",
        scope=Scope.settings,
        default="OEoXaMPEzfM")
    youtube_id_0_75 = String(
        help=
        "Optional, for older browsers: the Youtube ID for the .75x speed video.",
        display_name="Youtube ID for .75x speed",
        scope=Scope.settings,
        default="")
    youtube_id_1_25 = String(
        help=
        "Optional, for older browsers: the Youtube ID for the 1.25x speed video.",
        display_name="Youtube ID for 1.25x speed",
        scope=Scope.settings,
        default="")
    youtube_id_1_5 = String(
        help=
        "Optional, for older browsers: the Youtube ID for the 1.5x speed video.",
        display_name="Youtube ID for 1.5x speed",
        scope=Scope.settings,
        default="")
    start_time = RelativeTime(  # datetime.timedelta object
        help="Start time for the video (HH:MM:SS). Max value is 23:59:59.",
        display_name="Start Time",
        scope=Scope.settings,
        default=datetime.timedelta(seconds=0))
    end_time = RelativeTime(  # datetime.timedelta object
        help="End time for the video (HH:MM:SS). Max value is 23:59:59.",
        display_name="End Time",
        scope=Scope.settings,
        default=datetime.timedelta(seconds=0))
    #front-end code of video player checks logical validity of (start_time, end_time) pair.

    # `source` is deprecated field and should not be used in future.
    # `download_video` is used instead.
    source = String(help="The external URL to download the video.",
                    display_name="Download Video",
                    scope=Scope.settings,
                    default="")
    download_video = Boolean(
        help=
        "Show a link beneath the video to allow students to download the video. Note: You must add at least one video source below.",
        display_name="Video Download Allowed",
        scope=Scope.settings,
        default=False)
    html5_sources = List(
        help=
        "A list of filenames to be used with HTML5 video. The first supported filetype will be displayed.",
        display_name="Video Sources",
        scope=Scope.settings,
    )
    track = String(
        help=
        "The external URL to download the timed transcript track. This appears as a link beneath the video.",
        display_name="Download Transcript",
        scope=Scope.settings,
        default='')
    download_track = Boolean(
        help=
        "Show a link beneath the video to allow students to download the transcript. Note: You must add a link to the HTML5 Transcript field above.",
        display_name="Transcript Download Allowed",
        scope=Scope.settings,
        default=False)
    sub = String(
        help="The name of the timed transcript track (for non-Youtube videos).",
        display_name="Transcript (primary)",
        scope=Scope.settings,
        default="")
    show_captions = Boolean(
        help="This controls whether or not captions are shown by default.",
        display_name="Transcript Display",
        scope=Scope.settings,
        default=True)
    # Data format: {'de': 'german_translation', 'uk': 'ukrainian_translation'}
    transcripts = Dict(help="Add additional transcripts in other languages",
                       display_name="Transcript Translations",
                       scope=Scope.settings,
                       default={})
    transcript_language = String(
        help="Preferred language for transcript",
        display_name="Preferred language for transcript",
        scope=Scope.preferences,
        default="en")
    transcript_download_format = String(
        help="Transcript file format to download by user.",
        scope=Scope.preferences,
        values=[{
            "display_name": "SubRip (.srt) file",
            "value": "srt"
        }, {
            "display_name": "Text (.txt) file",
            "value": "txt"
        }],
        default='srt',
    )
    speed = Float(
        help="The last speed that was explicitly set by user for the video.",
        scope=Scope.user_state,
    )
    global_speed = Float(
        help=
        "Default speed in cases when speed wasn't explicitly for specific video",
        scope=Scope.preferences,
        default=1.0)
コード例 #25
0
ファイル: lti_module.py プロジェクト: paulmedwal/edx-platform
class LTIFields(object):
    """
    Fields to define and obtain LTI tool from provider are set here,
    except credentials, which should be set in course settings::

    `lti_id` is id to connect tool with credentials in course settings. It should not contain :: (double semicolon)
    `launch_url` is launch URL of tool.
    `custom_parameters` are additional parameters to navigate to proper book and book page.

    For example, for Vitalsource provider, `launch_url` should be
    *https://bc-staging.vitalsource.com/books/book*,
    and to get to proper book and book page, you should set custom parameters as::

        vbid=put_book_id_here
        book_location=page/put_page_number_here

    Default non-empty URL for `launch_url` is needed due to oauthlib demand (URL scheme should be presented)::

    https://github.com/idan/oauthlib/blob/master/oauthlib/oauth1/rfc5849/signature.py#L136
    """
    display_name = String(
        display_name=_("Display Name"),
        help=_(
            "Enter the name that students see for this component.  "
            "Analytics reports may also use the display name to identify this component."
        ),
        scope=Scope.settings,
        default="LTI",
    )
    lti_id = String(
        display_name=_("LTI ID"),
        help=_("Enter the LTI ID for the external LTI provider.  "
               "This value must be the same LTI ID that you entered in the "
               "LTI Passports setting on the Advanced Settings page."
               "<br />See " + DOCS_ANCHOR_TAG +
               " for more details on this setting."),
        default='',
        scope=Scope.settings)
    launch_url = String(
        display_name=_("LTI URL"),
        help=_(
            "Enter the URL of the external tool that this component launches. "
            "This setting is only used when Hide External Tool is set to False."
            "<br />See " + DOCS_ANCHOR_TAG +
            " for more details on this setting."),
        default='http://www.example.com',
        scope=Scope.settings)
    custom_parameters = List(
        display_name=_("Custom Parameters"),
        help=
        _("Add the key/value pair for any custom parameters, such as the page your e-book should open to or "
          "the background color for this component."
          "<br />See " + DOCS_ANCHOR_TAG +
          " for more details on this setting."),
        scope=Scope.settings)
    open_in_a_new_page = Boolean(
        display_name=_("Open in New Page"),
        help=
        _("Select True if you want students to click a link that opens the LTI tool in a new window. "
          "Select False if you want the LTI content to open in an IFrame in the current page. "
          "This setting is only used when Hide External Tool is set to False.  "
          ),
        default=True,
        scope=Scope.settings)
    has_score = Boolean(
        display_name=_("Scored"),
        help=
        _("Select True if this component will receive a numerical score from the external LTI system."
          ),
        default=False,
        scope=Scope.settings)
    weight = Float(
        display_name=_("Weight"),
        help=_("Enter the number of points possible for this component.  "
               "The default value is 1.0.  "
               "This setting is only used when Scored is set to True."),
        default=1.0,
        scope=Scope.settings,
        values={"min": 0},
    )
    module_score = Float(help=_(
        "The score kept in the xblock KVS -- duplicate of the published score in django DB"
    ),
                         default=None,
                         scope=Scope.user_state)
    score_comment = String(
        help=_("Comment as returned from grader, LTI2.0 spec"),
        default="",
        scope=Scope.user_state)
    hide_launch = Boolean(
        display_name=_("Hide External Tool"),
        help=
        _("Select True if you want to use this component as a placeholder for syncing with an external grading  "
          "system rather than launch an external tool.  "
          "This setting hides the Launch button and any IFrames for this component."
          ),
        default=False,
        scope=Scope.settings)
コード例 #26
0
class RateXBlock(XBlock):
    """
    This is an XBlock -- eventually, hopefully an aside -- which
    allows you to rate content in the course. We've wanted this for a
    long time, but Dartmouth finally encourage me to start to build
    this.
    """
    # This is a list of prompts. If we have multiple elements in the
    # list, one will be chosen at random. This is currently not
    # exposed in the UX. If the prompt is missing any portions, we
    # will default to the ones in default_prompt.
    prompts = List(default=[{
        'freeform':
        "Please provide us feedback on this section",
        'likert':
        "Please rate your overall experience with this section"
    }],
                   scope=Scope.settings,
                   help="Freeform user prompt",
                   xml_node=True)

    prompt_choice = Integer(
        default=-1,
        scope=Scope.user_state,
        help="Random number generated for p. -1 if uninitialized")

    user_vote = Integer(default=-1,
                        scope=Scope.user_state,
                        help="How user voted. -1 if didn't vote")

    p = Float(default=100,
              scope=Scope.settings,
              help="What percent of the time should this show?")

    p_user = Float(default=-1,
                   scope=Scope.user_state,
                   help="Random number generated for p. -1 if uninitialized")

    vote_aggregate = List(default=None,
                          scope=Scope.user_state_summary,
                          help="A list of user votes")

    user_freeform = String(default="", scope=Scope.user_state, help="Feedback")

    display_name = String(display_name="Display Name",
                          default="Provide Feedback",
                          scopde=Scope.settings)

    def resource_string(self, path):
        """Handy helper for getting resources from our kit."""
        data = pkg_resources.resource_string(__name__, path)
        return data.decode("utf8")

    def get_prompt(self, index=-1):
        """
        Return the current prompt dictionary, doing appropriate
        randomization if necessary, and falling back to defaults when
        necessary.
        """
        if index == -1:
            index = self.prompt_choice

        _ = self.runtime.service(self, 'i18n').ugettext
        prompt = {
            'freeform':
            _("Please provide us feedback on this section."),
            'likert':
            _("Please rate your overall experience "
              "with this section."),
            'mouseovers':
            [_("Excellent"),
             _("Good"),
             _("Average"),
             _("Fair"),
             _("Poor")],
            'icons': ["😁", "😊", "😐", "😞", "😭"]
        }

        prompt.update(self.prompts[index])
        return prompt

    def student_view(self, context=None):
        """
        The primary view of the RateXBlock, shown to students
        when viewing courses.
        """
        # Figure out which prompt we show. We set self.prompt_choice to
        # the index of the prompt. We set it if it is out of range (either
        # uninitiailized, or incorrect due to changing list length). Then,
        # we grab the prompt, prepopulated with defaults.
        if self.prompt_choice < 0 or self.prompt_choice >= len(self.prompts):
            self.prompt_choice = random.randint(0, len(self.prompts) - 1)
        prompt = self.get_prompt()

        # Now, we render the RateXBlock. This may be redundant, since we
        # don't always show it.
        html = self.resource_string("static/html/rate.html")
        # The replace allows us to format the HTML nicely without getting
        # extra whitespace
        if self.vote_aggregate and self.is_staff():
            scale_item = self.resource_string("static/html/staff_item.html")
        else:
            scale_item = self.resource_string("static/html/scale_item.html")
        scale_item = scale_item.replace('\n', '')
        indexes = list(range(len(prompt['icons'])))
        active_vote = [
            "checked" if i == self.user_vote else "" for i in indexes
        ]
        self.init_vote_aggregate()
        votes = self.vote_aggregate
        scale = "".join(
            scale_item.format(level=l, icon=icon, i=i, active=a, votes=v)
            for (l, icon, i, a,
                 v) in zip(prompt['mouseovers'], prompt['icons'], indexes,
                           active_vote, votes))
        if self.user_vote != -1:
            _ = self.runtime.service(self, 'i18n').ugettext
            response = _("Thank you for voting!")
        else:
            response = ""
        rendered = html.format(self=self,
                               scale=scale,
                               freeform_prompt=prompt['freeform'],
                               likert_prompt=prompt['likert'],
                               response=response)

        # We initialize self.p_user if not initialized -- this sets whether
        # or not we show it. From there, if it is less than odds of showing,
        # we set the fragment to the rendered XBlock. Otherwise, we return
        # empty HTML. There ought to be a way to return None, but XBlocks
        # doesn't support that.
        if self.p_user == -1:
            self.p_user = random.uniform(0, 100)
        if self.p_user < self.p:
            frag = Fragment(rendered)
        else:
            frag = Fragment("")

        # Finally, we do the standard JS+CSS boilerplate. Honestly, XBlocks
        # ought to have a sane default here.
        frag.add_css(self.resource_string("static/css/rate.css"))
        frag.add_javascript(self.resource_string("static/js/src/rate.js"))
        frag.initialize_js('RateXBlock')
        return frag

    def studio_view(self, context):
        """
        Create a fragment used to display the edit view in the Studio.
        """
        html_str = self.resource_string("static/html/studio_view.html")
        prompt = self.get_prompt(0)
        frag = Fragment(str(html_str).format(**prompt))
        js_str = self.resource_string("static/js/src/studio.js")
        frag.add_javascript(str(js_str))
        frag.initialize_js('RateBlock')
        return frag

    @XBlock.json_handler
    def studio_submit(self, data, suffix=''):
        """
        Called when submitting the form in Studio.
        """
        self.prompts[0]['freeform'] = data.get('freeform')
        self.prompts[0]['likert'] = data.get('likert')
        return {'result': 'success'}

    def init_vote_aggregate(self):
        # Make sure we're initialized
        if not self.vote_aggregate:
            self.vote_aggregate = [0] * (len(self.get_prompt()['mouseovers']))

    def vote(self, data):
        """
        Handle voting
        """
        # prompt_choice is initialized by student view.
        # Ideally, we'd break this out into a function.
        prompt = self.get_prompt(self.prompt_choice)

        # Make sure we're initialized
        self.init_vote_aggregate()

        # Remove old vote if we voted before
        if self.user_vote != -1:
            self.vote_aggregate[self.user_vote] -= 1

        self.user_vote = data['vote']
        self.vote_aggregate[self.user_vote] += 1

    @XBlock.json_handler
    def feedback(self, data, suffix=''):
        '''
        Allow students to submit feedback, both numerical and
        qualitative. We only update the specific type of feedback
        submitted.

        We return the current state. While this is not used by the
        client code, it is helpful for testing. For staff users, we
        also return the aggregate results.
        '''
        _ = self.runtime.service(self, 'i18n').ugettext

        if 'freeform' not in data and 'vote' not in data:
            response = {"success": False, "response": _("Please vote!")}
            self.runtime.publish(self, 'edx.ratexblock.nothing_provided', {})
        if 'freeform' in data:
            response = {
                "success": True,
                "response": _("Thank you for your feedback!")
            }
            self.runtime.publish(
                self, 'edx.ratexblock.freeform_provided', {
                    'old_freeform': self.user_freeform,
                    'new_freeform': data['freeform']
                })
            self.user_freeform = data['freeform']
        if 'vote' in data:
            response = {
                "success": True,
                "response": _("Thank you for voting!")
            }
            self.runtime.publish(self, 'edx.ratexblock.likert_provided', {
                'old_vote': self.user_vote,
                'new_vote': data['vote']
            })
            self.vote(data)

        response.update({
            "freeform": self.user_freeform,
            "vote": self.user_vote
        })

        if self.is_staff():
            response['aggregate'] = self.vote_aggregate

        return response

    # TO-DO: change this to create the scenarios you'd like to see in the
    # workbench while developing your XBlock.
    @staticmethod
    def workbench_scenarios():
        """A canned scenario for display in the workbench."""
        return [
            ("RateXBlock", """<vertical_demo>
                <rate p="50"/>
                <rate p="50"/>
                <rate p="50"/>
                </vertical_demo>
             """),
        ]

    def is_staff(self):
        """
        Return self.xmodule_runtime.user_is_staff if available

        This is not a supported part of the XBlocks API in all
        runtimes, and this is a workaround so something reasonable
        happens in both workbench and edx-platform
        """
        if hasattr(self, "xmodule_runtime") and \
           hasattr(self.xmodule_runtime, "user_is_staff"):
            return self.xmodule_runtime.user_is_staff
        else:
            # In workbench and similar settings, always return true
            return True
コード例 #27
0
class ScormXBlock(XBlock):
    """
    When a user uploads a Scorm package, the zip file is stored in:

        media/{org}/{course}/{block_type}/{block_id}/{sha1}{ext}

    This zip file is then extracted to the media/{scorm_location}/{block_id}.

    The scorm location is defined by the LOCATION xblock setting. If undefined, this is
    "scorm". This setting can be set e.g:

        XBLOCK_SETTINGS["ScormXBlock"] = {
            "LOCATION": "alternatevalue",
        }

    Note that neither the folder the folder nor the package file are deleted when the
    xblock is removed.
    """

    display_name = String(
        display_name=_("Display Name"),
        help=_("Display name for this module"),
        default="Scorm module",
        scope=Scope.settings,
    )
    index_page_path = String(
        display_name=_("Path to the index page in scorm file"),
        scope=Scope.settings)
    package_meta = Dict(scope=Scope.content)
    scorm_version = String(default="SCORM_12", scope=Scope.settings)

    # save completion_status for SCORM_2004
    lesson_status = String(scope=Scope.user_state, default="not attempted")
    success_status = String(scope=Scope.user_state, default="unknown")
    lesson_score = Float(scope=Scope.user_state, default=0)
    weight = Float(
        default=1,
        display_name=_("Weight"),
        help=_("Weight/Maximum grade"),
        scope=Scope.settings,
    )
    has_score = Boolean(
        display_name=_("Scored"),
        help=
        _("Select False if this component will not receive a numerical score from the Scorm"
          ),
        default=True,
        scope=Scope.settings,
    )

    # See the Scorm data model:
    # https://scorm.com/scorm-explained/technical-scorm/run-time/
    scorm_data = Dict(scope=Scope.user_state, default={})

    icon_class = String(default="video", scope=Scope.settings)
    width = Integer(
        display_name=_("Display width (px)"),
        help=_("Width of iframe (default: 100%)"),
        scope=Scope.settings,
    )
    height = Integer(
        display_name=_("Display height (px)"),
        help=_("Height of iframe"),
        default=450,
        scope=Scope.settings,
    )

    has_author_view = True

    def render_template(self, template_path, context):
        template_str = self.resource_string(template_path)
        template = Template(template_str)
        return template.render(Context(context))

    @staticmethod
    def resource_string(path):
        """Handy helper for getting static resources from our kit."""
        data = pkg_resources.resource_string(__name__, path)
        return data.decode("utf8")

    def author_view(self, context=None):
        context = context or {}
        if not self.index_page_path:
            context[
                "message"] = "Click 'Edit' to modify this module and upload a new SCORM package."
        return self.student_view(context=context)

    def student_view(self, context=None):
        logger.info("student_view index_page_url %s", self.index_page_url)
        student_context = {
            "index_page_url": self.index_page_url,
            "completion_status": self.get_completion_status(),
            "grade": self.get_grade(),
            "scorm_xblock": self,
        }
        student_context.update(context or {})
        template = self.render_template("static/html/scormxblock.html",
                                        student_context)
        frag = Fragment(template)
        frag.add_css(self.resource_string("static/css/scormxblock.css"))
        frag.add_javascript(
            self.resource_string("static/js/src/scormxblock.js"))
        frag.initialize_js("ScormXBlock",
                           json_args={"scorm_version": self.scorm_version})
        return frag

    def studio_view(self, context=None):
        # Note that we cannot use xblockutils's StudioEditableXBlockMixin because we
        # need to support package file uploads.
        studio_context = {
            "field_display_name": self.fields["display_name"],
            "field_has_score": self.fields["has_score"],
            "field_weight": self.fields["weight"],
            "field_width": self.fields["width"],
            "field_height": self.fields["height"],
            "scorm_xblock": self,
        }
        studio_context.update(context or {})
        template = self.render_template("static/html/studio.html",
                                        studio_context)
        frag = Fragment(template)
        frag.add_css(self.resource_string("static/css/scormxblock.css"))
        frag.add_javascript(self.resource_string("static/js/src/studio.js"))
        frag.initialize_js("ScormStudioXBlock")
        return frag

    @staticmethod
    def json_response(data):
        return Response(json.dumps(data),
                        content_type="application/json",
                        charset="utf8")

    @XBlock.handler
    def studio_submit(self, request, _suffix):
        self.display_name = request.params["display_name"]
        self.width = request.params["width"]
        self.height = request.params["height"]
        self.has_score = request.params["has_score"]
        self.weight = request.params["weight"]
        self.icon_class = "problem" if self.has_score == "True" else "video"

        response = {"result": "success", "errors": []}
        if not hasattr(request.params["file"], "file"):
            # File not uploaded
            return self.json_response(response)

        package_file = request.params["file"].file
        self.update_package_meta(package_file)

        # First, save scorm file in the storage for mobile clients
        if default_storage.exists(self.package_path):
            logger.info('Removing previously uploaded "%s"', self.package_path)
            default_storage.delete(self.package_path)
        default_storage.save(self.package_path, File(package_file))
        logger.info('Scorm "%s" file stored at "%s"', package_file,
                    self.package_path)

        # Then, extract zip file
        if default_storage.exists(self.extract_folder_base_path):
            logger.info('Removing previously unzipped "%s"',
                        self.extract_folder_base_path)
            recursive_delete(self.extract_folder_base_path)
        with zipfile.ZipFile(package_file, "r") as scorm_zipfile:
            tmp_dir = tempfile.mkdtemp()
            for zipinfo in scorm_zipfile.infolist():
                # Do not unzip folders, only files. In Python 3.6 we will have access to
                # the is_dir() method to verify whether a ZipInfo object points to a
                # directory.
                # https://docs.python.org/3.6/library/zipfile.html#zipfile.ZipInfo.is_dir
                if not zipinfo.filename.endswith("/"):
                    # Manually extract the file to avoid UnsupportedOperation seek
                    tmp_file = scorm_zipfile.extract(zipinfo, tmp_dir)
                    logger.info("Extracting SCORM file %s", zipinfo.filename)

                    # This is an extremely hacky solution.
                    # The problem is mimetypes.guess_type('*.js')
                    # eventually return type as bytes instead of str (i.e b'text/javascript' instead)
                    # Why I said eventually because at first start-up, everything is fine. But after
                    # using studio for uploading other files (video transcript for example), the problem
                    # appears

                    mimetypes.add_types('text/javascript', '.js')
                    default_storage.save(
                        os.path.join(self.extract_folder_path,
                                     zipinfo.filename), open(tmp_file, "rb"))
                    os.remove(tmp_file)
        try:
            self.update_package_fields()
        except ScormError as e:
            response["errors"].append(e.args[0])

        return self.json_response(response)

    @property
    def index_page_url(self):
        if not self.package_meta or not self.index_page_path:
            logger.info("index_page_url index_page_url is blank")
            return ""
        folder = self.extract_folder_path
        if default_storage.exists(
                os.path.join(self.extract_folder_base_path,
                             self.index_page_path)):
            # For backward-compatibility, we must handle the case when the xblock data
            # is stored in the base folder.
            folder = self.extract_folder_base_path
            logger.warning("Serving SCORM content from old-style path: %s",
                           folder)
        result = default_storage.url(os.path.join(folder,
                                                  self.index_page_path))
        logger.info("index_page_url index_page_url = %s", result)
        return result

    @property
    def package_path(self):
        """
        Get file path of storage.
        """
        return (
            "{loc.org}/{loc.course}/{loc.block_type}/{loc.block_id}/{sha1}{ext}"
        ).format(
            loc=self.location,
            sha1=self.package_meta["sha1"],
            ext=os.path.splitext(self.package_meta["name"])[1],
        )

    @property
    def extract_folder_path(self):
        """
        This path needs to depend on the content of the scorm package. Otherwise,
        served media files might become stale when the package is update.
        """
        return os.path.join(self.extract_folder_base_path,
                            self.package_meta["sha1"])

    @property
    def extract_folder_base_path(self):
        """
        Path to the folder where packages will be extracted.
        """
        return os.path.join(self.scorm_location(), self.location.block_id)

    @XBlock.json_handler
    def scorm_get_value(self, data, _suffix):
        name = data.get("name")
        if name in ["cmi.core.lesson_status", "cmi.completion_status"]:
            return {"value": self.lesson_status}
        if name == "cmi.success_status":
            return {"value": self.success_status}
        if name in ["cmi.core.score.raw", "cmi.score.raw"]:
            return {"value": self.lesson_score * 100}
        return {"value": self.scorm_data.get(name, "")}

    @XBlock.json_handler
    def scorm_set_value(self, data, _suffix):
        context = {"result": "success"}
        name = data.get("name")

        if name in ["cmi.core.lesson_status", "cmi.completion_status"]:
            self.lesson_status = data.get("value")
            if self.has_score and data.get("value") in [
                    "completed",
                    "failed",
                    "passed",
            ]:
                self.publish_grade()
                context.update({"lesson_score": self.lesson_score})
        elif name == "cmi.success_status":
            self.success_status = data.get("value")
            if self.has_score:
                if self.success_status == "unknown":
                    self.lesson_score = 0
                self.publish_grade()
                context.update({"lesson_score": self.lesson_score})
        elif name in ["cmi.core.score.raw", "cmi.score.raw"
                      ] and self.has_score:
            self.lesson_score = float(data.get("value", 0)) / 100.0
            self.publish_grade()
            context.update({"lesson_score": self.lesson_score})
        else:
            self.scorm_data[name] = data.get("value", "")

        context.update({"completion_status": self.get_completion_status()})
        return context

    def publish_grade(self):
        self.runtime.publish(
            self,
            "grade",
            {
                "value": self.get_grade(),
                "max_value": self.weight
            },
        )

    def get_grade(self):
        lesson_score = self.lesson_score
        if self.lesson_status == "failed" or (
                self.scorm_version == "SCORM_2004"
                and self.success_status in ["failed", "unknown"]):
            lesson_score = 0
        return lesson_score * self.weight

    def set_score(self, score):
        """
        Utility method used to rescore a problem.
        """
        self.lesson_score = score.raw_earned / self.weight

    def max_score(self):
        """
        Return the maximum score possible.
        """
        return self.weight if self.has_score else None

    def update_package_meta(self, package_file):
        self.package_meta["sha1"] = self.get_sha1(package_file)
        self.package_meta["name"] = package_file.name
        self.package_meta["last_updated"] = timezone.now().strftime(
            DateTime.DATETIME_FORMAT)
        self.package_meta["size"] = package_file.seek(0, 2)
        package_file.seek(0)

    def update_package_fields(self):
        """
        Update version and index page path fields.
        """
        self.index_page_path = ""
        imsmanifest_path = os.path.join(self.extract_folder_path,
                                        "imsmanifest.xml")
        try:
            imsmanifest_file = default_storage.open(imsmanifest_path)
        except IOError:
            raise ScormError(
                "Invalid package: could not find 'imsmanifest.xml' file at the root of the zip file"
            )
        else:
            tree = ET.parse(imsmanifest_file)
            imsmanifest_file.seek(0)
            self.index_page_path = "index.html"
            namespace = ""
            for _, node in ET.iterparse(imsmanifest_file, events=["start-ns"]):
                if node[0] == "":
                    namespace = node[1]
                    break
            root = tree.getroot()

            if namespace:
                resource = root.find(
                    "{{{0}}}resources/{{{0}}}resource".format(namespace))
                schemaversion = root.find(
                    "{{{0}}}metadata/{{{0}}}schemaversion".format(namespace))
            else:
                resource = root.find("resources/resource")
                schemaversion = root.find("metadata/schemaversion")

            if resource:
                self.index_page_path = resource.get("href")
            if (schemaversion is not None) and (re.match(
                    "^1.2$", schemaversion.text) is None):
                self.scorm_version = "SCORM_2004"
            else:
                self.scorm_version = "SCORM_12"

    def get_completion_status(self):
        completion_status = self.lesson_status
        if self.scorm_version == "SCORM_2004" and self.success_status != "unknown":
            completion_status = self.success_status
        return completion_status

    def scorm_location(self):
        """
        Unzipped files will be stored in a media folder with this name, and thus
        accessible at a url with that also includes this name.
        """
        default_scorm_location = "scorm"
        settings_service = self.runtime.service(self, "settings")
        if not settings_service:
            return default_scorm_location
        xblock_settings = settings_service.get_settings_bucket(self)
        return xblock_settings.get("LOCATION", default_scorm_location)

    @staticmethod
    def get_sha1(file_descriptor):
        """
        Get file hex digest (fingerprint).
        """
        block_size = 8 * 1024
        sha1 = hashlib.sha1()
        while True:
            block = file_descriptor.read(block_size)
            if not block:
                break
            sha1.update(block)
        file_descriptor.seek(0)
        return sha1.hexdigest()

    def student_view_data(self):
        """
        Inform REST api clients about original file location and it's "freshness".
        Make sure to include `student_view_data=openedxscorm` to URL params in the request.
        """
        if self.index_page_url:
            return {
                "last_modified": self.package_meta.get("last_updated", ""),
                "scorm_data": default_storage.url(self.package_path),
                "size": self.package_meta.get("size", 0),
                "index_page": self.index_page_path,
            }
        return {}

    @staticmethod
    def workbench_scenarios():
        """A canned scenario for display in the workbench."""
        return [
            (
                "ScormXBlock",
                """<vertical_demo>
                <openedxscorm/>
                </vertical_demo>
             """,
            ),
        ]
コード例 #28
0
class SubmitAndCompareModelMixin(object):
    """
    Handle data access logic for the XBlock
    """

    has_score = True
    display_name = String(
        display_name='Display Name',
        default='Submit and Compare',
        scope=Scope.settings,
        help=('This name appears in the horizontal'
              ' navigation at the top of the page'),
    )
    student_answer = String(
        default='',
        scope=Scope.user_state,
        help='This is the student\'s answer to the question',
    )
    max_attempts = Integer(
        default=0,
        scope=Scope.settings,
    )
    count_attempts = Integer(
        default=0,
        scope=Scope.user_state,
    )
    your_answer_label = String(
        default='Your Answer:',
        scope=Scope.settings,
        help='Label for the text area containing the student\'s answer',
    )
    our_answer_label = String(
        default='Our Answer:',
        scope=Scope.settings,
        help='Label for the \'expert\' answer',
    )
    submit_button_label = String(
        default='Submit and Compare',
        scope=Scope.settings,
        help='Label for the submit button',
    )
    hints = List(
        default=[],
        scope=Scope.content,
        help='Hints for the question',
    )
    question_string = String(help='Default question content ',
                             scope=Scope.content,
                             multiline_editor=True,
                             default=textwrap.dedent("""
            <submit_and_compare schema_version='1'>
                <body>
                    <p>
                        Before you begin the simulation,
                        think for a minute about your hypothesis.
                        What do you expect the outcome of the simulation
                        will be?  What data do you need to gather in order
                        to prove or disprove your hypothesis?
                    </p>
                </body>
                <explanation>
                    <p>
                        We would expect the simulation to show that
                        there is no difference between the two scenarios.
                        Relevant data to gather would include time and
                        temperature.
                    </p>
                </explanation>
                <demandhint>
                    <hint>
                        A hypothesis is a proposed explanation for a
                        phenomenon. In this case, the hypothesis is what
                        we think the simulation will show.
                    </hint>
                    <hint>
                        Once you've decided on your hypothesis, which data
                        would help you determine if that hypothesis is
                        correct or incorrect?
                    </hint>
                </demandhint>
            </submit_and_compare>
        """))
    score = Float(
        default=0.0,
        scope=Scope.user_state,
    )
    weight = Integer(
        display_name='Weight',
        help='This assigns an integer value representing '
        'the weight of this problem',
        default=0,
        scope=Scope.settings,
    )

    def max_score(self):
        """
        Returns the configured number of possible points for this component.
        Arguments:
            None
        Returns:
            float: The number of possible points for this component
        """
        return self.weight
コード例 #29
0
class VideoFields(object):
    """Fields for `VideoModule` and `VideoDescriptor`."""
    display_name = String(
        help=_("The name students see. This name appears in the course ribbon and as a header for the video."),
        display_name=_("Component Display Name"),
        default="Video",
        scope=Scope.settings
    )

    saved_video_position = RelativeTime(
        help=_("Current position in the video."),
        scope=Scope.user_state,
        default=datetime.timedelta(seconds=0)
    )
    # TODO: This should be moved to Scope.content, but this will
    # require data migration to support the old video module.
    youtube_id_1_0 = String(
        help=_("Optional, for older browsers: the YouTube ID for the normal speed video."),
        display_name=_("YouTube ID"),
        scope=Scope.settings,
        default="3_yD_cEKoCk"
    )
    youtube_id_0_75 = String(
        help=_("Optional, for older browsers: the YouTube ID for the .75x speed video."),
        display_name=_("YouTube ID for .75x speed"),
        scope=Scope.settings,
        default=""
    )
    youtube_id_1_25 = String(
        help=_("Optional, for older browsers: the YouTube ID for the 1.25x speed video."),
        display_name=_("YouTube ID for 1.25x speed"),
        scope=Scope.settings,
        default=""
    )
    youtube_id_1_5 = String(
        help=_("Optional, for older browsers: the YouTube ID for the 1.5x speed video."),
        display_name=_("YouTube ID for 1.5x speed"),
        scope=Scope.settings,
        default=""
    )
    start_time = RelativeTime(  # datetime.timedelta object
        help=_(
            "Time you want the video to start if you don't want the entire video to play. "
            "Not supported in the native mobile app: the full video file will play. "
            "Formatted as HH:MM:SS. The maximum value is 23:59:59."
        ),
        display_name=_("Video Start Time"),
        scope=Scope.settings,
        default=datetime.timedelta(seconds=0)
    )
    end_time = RelativeTime(  # datetime.timedelta object
        help=_(
            "Time you want the video to stop if you don't want the entire video to play. "
            "Not supported in the native mobile app: the full video file will play. "
            "Formatted as HH:MM:SS. The maximum value is 23:59:59."
        ),
        display_name=_("Video Stop Time"),
        scope=Scope.settings,
        default=datetime.timedelta(seconds=0)
    )
    #front-end code of video player checks logical validity of (start_time, end_time) pair.

    # `source` is deprecated field and should not be used in future.
    # `download_video` is used instead.
    source = String(
        help=_("The external URL to download the video."),
        display_name=_("Download Video"),
        scope=Scope.settings,
        default=""
    )
    download_video = Boolean(
        help=_("Allow students to download versions of this video in different formats if they cannot use the edX video player or do not have access to YouTube. You must add at least one non-YouTube URL in the Video File URLs field."),  # pylint: disable=line-too-long
        display_name=_("Video Download Allowed"),
        scope=Scope.settings,
        default=False
    )
    html5_sources = List(
        help=_("The URL or URLs where you've posted non-YouTube versions of the video. Each URL must end in .mpeg, .mp4, .ogg, or .webm and cannot be a YouTube URL. (For browser compatibility, we strongly recommend .mp4 and .webm format.) Students will be able to view the first listed video that's compatible with the student's computer. To allow students to download these videos, set Video Download Allowed to True."),  # pylint: disable=line-too-long
        display_name=_("Video File URLs"),
        scope=Scope.settings,
    )
    track = String(
        help=_("By default, students can download an .srt or .txt transcript when you set Download Transcript Allowed to True. If you want to provide a downloadable transcript in a different format, we recommend that you upload a handout by using the Upload a Handout field. If this isn't possible, you can post a transcript file on the Files & Uploads page or on the Internet, and then add the URL for the transcript here. Students see a link to download that transcript below the video."),  # pylint: disable=line-too-long
        display_name=_("Downloadable Transcript URL"),
        scope=Scope.settings,
        default=''
    )
    download_track = Boolean(
        help=_("Allow students to download the timed transcript. A link to download the file appears below the video. By default, the transcript is an .srt or .txt file. If you want to provide the transcript for download in a different format, upload a file by using the Upload Handout field."),  # pylint: disable=line-too-long
        display_name=_("Download Transcript Allowed"),
        scope=Scope.settings,
        default=False
    )
    sub = String(
        help=_("The default transcript for the video, from the Default Timed Transcript field on the Basic tab. This transcript should be in English. You don't have to change this setting."),  # pylint: disable=line-too-long
        display_name=_("Default Timed Transcript"),
        scope=Scope.settings,
        default=""
    )
    show_captions = Boolean(
        help=_("Specify whether the transcripts appear with the video by default."),
        display_name=_("Show Transcript"),
        scope=Scope.settings,
        default=True
    )
    # Data format: {'de': 'german_translation', 'uk': 'ukrainian_translation'}
    transcripts = Dict(
        help=_("Add transcripts in different languages. Click below to specify a language and upload an .srt transcript file for that language."),  # pylint: disable=line-too-long
        display_name=_("Transcript Languages"),
        scope=Scope.settings,
        default={}
    )
    transcript_language = String(
        help=_("Preferred language for transcript."),
        display_name=_("Preferred language for transcript"),
        scope=Scope.preferences,
        default="en"
    )
    transcript_download_format = String(
        help=_("Transcript file format to download by user."),
        scope=Scope.preferences,
        values=[
            # Translators: This is a type of file used for captioning in the video player.
            {"display_name": _("SubRip (.srt) file"), "value": "srt"},
            {"display_name": _("Text (.txt) file"), "value": "txt"}
        ],
        default='srt',
    )
    speed = Float(
        help=_("The last speed that the user specified for the video."),
        scope=Scope.user_state
    )
    global_speed = Float(
        help=_("The default speed for the video."),
        scope=Scope.preferences,
        default=1.0
    )
    youtube_is_available = Boolean(
        help=_("Specify whether YouTube is available for the user."),
        scope=Scope.user_info,
        default=True
    )
    handout = String(
        help=_("Upload a handout to accompany this video. Students can download the handout by clicking Download Handout under the video."),  # pylint: disable=line-too-long
        display_name=_("Upload Handout"),
        scope=Scope.settings,
    )
    only_on_web = Boolean(
        help=_(
            "Specify whether access to this video is limited to browsers only, or if it can be "
            "accessed from other applications including mobile apps."
        ),
        display_name=_("Video Available on Web Only"),
        scope=Scope.settings,
        default=False
    )
    edx_video_id = String(
        help=_("If you were assigned a Video ID by edX for the video to play in this component, enter the ID here. In this case, do not enter values in the Default Video URL, the Video File URLs, and the YouTube ID fields. If you were not assigned a Video ID, enter values in those other fields and ignore this field."),  # pylint: disable=line-too-long
        display_name=_("Video ID"),
        scope=Scope.settings,
        default="",
    )
    bumper_last_view_date = DateTime(
        display_name=_("Date of the last view of the bumper"),
        scope=Scope.preferences,
    )
    bumper_do_not_show_again = Boolean(
        display_name=_("Do not show bumper again"),
        scope=Scope.preferences,
        default=False,
    )
コード例 #30
0
class MentoringBlock(BaseMentoringBlock, StudioContainerXBlockMixin,
                     StepParentMixin):
    """
    An XBlock providing mentoring capabilities

    Composed of text, answers input fields, and a set of MRQ/MCQ with advices.
    A set of conditions on the provided answers and MCQ/MRQ choices will determine if the
    student is a) provided mentoring advices and asked to alter his answer, or b) is given the
    ok to continue.
    """
    # Content
    MENTORING_MODES = ('standard', 'assessment')
    mode = String(display_name=_("Mode"),
                  help=_("Mode of the mentoring. 'standard' or 'assessment'"),
                  default='standard',
                  scope=Scope.content,
                  values=MENTORING_MODES)
    followed_by = String(
        display_name=_("Followed by"),
        help=
        _("url_name of the step after the current mentoring block in workflow."
          ),
        default=None,
        scope=Scope.content)
    enforce_dependency = Boolean(
        display_name=_("Enforce Dependency"),
        help=_("Should the next step be the current block to complete?"),
        default=False,
        scope=Scope.content,
        enforce_type=True)
    display_submit = Boolean(display_name=_("Show Submit Button"),
                             help=_("Allow submission of the current block?"),
                             default=True,
                             scope=Scope.content,
                             enforce_type=True)
    xml_content = String(
        display_name=_("XML content"),
        help=
        _("Not used for version 2. This field is here only to preserve the data needed to upgrade from v1 to v2."
          ),
        default='',
        scope=Scope.content,
        multiline_editor=True)

    # Settings
    weight = Float(display_name=_("Weight"),
                   help=_("Defines the maximum total grade of the block."),
                   default=1,
                   scope=Scope.settings,
                   enforce_type=True)
    display_name = String(display_name=_("Title (Display name)"),
                          help=_("Title to display"),
                          default=_("Problem Builder"),
                          scope=Scope.settings)
    feedback_label = String(display_name=_("Feedback Header"),
                            help=_("Header for feedback messages"),
                            default=_("Feedback"),
                            scope=Scope.content)

    # User state
    attempted = Boolean(
        # Has the student attempted this mentoring step?
        default=False,
        scope=Scope.user_state
        # TODO: Does anything use this 'attempted' field? May want to delete it.
    )
    completed = Boolean(
        # Has the student completed this mentoring step?
        default=False,
        scope=Scope.user_state)
    step = Integer(
        # Keep track of the student assessment progress.
        default=0,
        scope=Scope.user_state,
        enforce_type=True)
    student_results = List(
        # Store results of student choices.
        default=[],
        scope=Scope.user_state)
    extended_feedback = Boolean(help=_(
        "Show extended feedback details when all attempts are used up."),
                                default=False,
                                Scope=Scope.content)

    # Global user state
    next_step = String(
        # url_name of the next step the student must complete (global to all blocks)
        default='mentoring_first',
        scope=Scope.preferences)

    editable_fields = ('display_name', 'followed_by', 'max_attempts',
                       'enforce_dependency', 'display_submit',
                       'feedback_label', 'weight', 'extended_feedback')

    has_score = True

    @property
    def is_assessment(self):
        """ Checks if mentoring XBlock is in assessment mode """
        return self.mode == 'assessment'

    def get_question_number(self, question_id):
        """
        Get the step number of the question id
        """
        for child_id in self.children:
            question = self.runtime.get_block(child_id)
            if isinstance(question, QuestionMixin) and (question.name
                                                        == question_id):
                return question.step_number
        raise ValueError(
            "Question ID in answer set not a step of this Mentoring Block!")

    def answer_mapper(self, answer_status):
        """
        Create a JSON-dumpable object with readable key names from a list of student answers.
        """
        answer_map = []
        for answer in self.student_results:
            if answer[1]['status'] == answer_status:
                try:
                    answer_map.append({
                        'number':
                        self.get_question_number(answer[0]),
                        'id':
                        answer[0],
                        'details':
                        answer[1],
                    })
                except ValueError:
                    pass  # The question has been deleted since the student answered it.
        return answer_map

    @property
    def score(self):
        """Compute the student score taking into account the weight of each step."""
        steps = self.steps
        steps_map = {q.name: q for q in steps}
        total_child_weight = sum(float(step.weight) for step in steps)
        if total_child_weight == 0:
            return Score(0, 0, [], [], [])
        points_earned = 0
        for q_name, q_details in self.student_results:
            question = steps_map.get(q_name)
            if question:
                points_earned += q_details['score'] * question.weight
        score = points_earned / total_child_weight
        correct = self.answer_mapper(CORRECT)
        incorrect = self.answer_mapper(INCORRECT)
        partially_correct = self.answer_mapper(PARTIAL)

        return Score(score, int(round(score * 100)), correct, incorrect,
                     partially_correct)

    def student_view(self, context):
        # Migrate stored data if necessary
        self.migrate_fields()

        # Validate self.step:
        num_steps = len(self.steps)
        if self.step > num_steps:
            self.step = num_steps

        fragment = Fragment()
        child_content = u""

        for child_id in self.children:
            child = self.runtime.get_block(child_id)
            if child is None:  # child should not be None but it can happen due to bugs or permission issues
                child_content += u"<p>[{}]</p>".format(
                    self._(u"Error: Unable to load child component."))
            elif not isinstance(child, MentoringMessageBlock):
                try:
                    if self.is_assessment and isinstance(child, QuestionMixin):
                        child_fragment = child.render('assessment_step_view',
                                                      context)
                    else:
                        child_fragment = child.render('mentoring_view',
                                                      context)
                except NoSuchViewError:
                    if child.scope_ids.block_type == 'html' and getattr(
                            self.runtime, 'is_author_mode', False):
                        # html block doesn't support mentoring_view, and if we use student_view Studio will wrap
                        # it in HTML that we don't want in the preview. So just render its HTML directly:
                        child_fragment = Fragment(child.data)
                    else:
                        child_fragment = child.render('student_view', context)
                fragment.add_frag_resources(child_fragment)
                child_content += child_fragment.content

        fragment.add_content(
            loader.render_template(
                'templates/html/mentoring.html', {
                    'self':
                    self,
                    'title':
                    self.display_name,
                    'show_title':
                    self.show_title,
                    'child_content':
                    child_content,
                    'missing_dependency_url':
                    self.has_missing_dependency and self.next_step_url,
                }))
        fragment.add_css_url(
            self.runtime.local_resource_url(self,
                                            'public/css/problem-builder.css'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(
                self, 'public/js/vendor/underscore-min.js'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/util.js'))
        js_file = 'public/js/mentoring_{}_view.js'.format(
            'assessment' if self.is_assessment else 'standard')
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, js_file))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/mentoring.js'))
        fragment.add_resource(
            loader.load_unicode('templates/html/mentoring_attempts.html'),
            "text/html")
        if self.is_assessment:
            fragment.add_resource(
                loader.load_unicode(
                    'templates/html/mentoring_assessment_templates.html'),
                "text/html")

        self.include_theme_files(fragment)
        # Workbench doesn't have font awesome, so add it:
        if WorkbenchRuntime and isinstance(self.runtime, WorkbenchRuntime):
            fragment.add_css_url(
                '//maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css'
            )

        fragment.initialize_js('MentoringBlock')

        if not self.display_submit:
            self.runtime.publish(self, 'progress', {})

        return fragment

    def migrate_fields(self):
        """
        Migrate data stored in the fields, when a format change breaks backward-compatibility with
        previous data formats
        """
        # Partial answers replaced the `completed` with `status` in `self.student_results`
        if self.student_results and 'completed' in self.student_results[0][1]:
            # Rename the field and use the new value format (text instead of boolean)
            for result in self.student_results:
                result[1]['status'] = 'correct' if result[1][
                    'completed'] else 'incorrect'
                del result[1]['completed']

    @property
    def additional_publish_event_data(self):
        return {
            'user_id': self.scope_ids.user_id,
            'component_id': self.url_name,
        }

    @property
    def has_missing_dependency(self):
        """
        Returns True if the student needs to complete another step before being able to complete
        the current one, and False otherwise
        """
        return self.enforce_dependency and (not self.completed) and (
            self.next_step != self.url_name)

    @property
    def next_step_url(self):
        """
        Returns the URL of the next step's page
        """
        return '/jump_to_id/{}'.format(self.next_step)

    def get_message(self, completed):
        """
        Get the message to display to a student following a submission in normal mode.
        """
        if completed:
            # Student has achieved a perfect score
            return self.get_message_content('completed')
        elif self.max_attempts_reached:
            # Student has not achieved a perfect score and cannot try again
            return self.get_message_content('max_attempts_reached')
        else:
            # Student did not achieve a perfect score but can try again:
            return self.get_message_content('incomplete')

    @property
    def assessment_message(self):
        """
        Get the message to display to a student following a submission in assessment mode.
        """
        if not self.max_attempts_reached:
            return self.get_message_content('on-assessment-review',
                                            or_default=True)
        else:
            return None

    @property
    def review_tips(self):
        """ Get review tips, shown for wrong answers in assessment mode. """
        if not self.is_assessment or self.step != len(self.step_ids):
            return [
            ]  # Review tips are only used in assessment mode, and only on the last step.
        review_tips = []
        status_cache = dict(self.student_results)
        for child in self.steps:
            result = status_cache.get(child.name)
            if result and result.get('status') != 'correct':
                # The student got this wrong. Check if there is a review tip to show.
                tip_html = child.get_review_tip()
                if tip_html:
                    if hasattr(self.runtime, 'replace_jump_to_id_urls'):
                        tip_html = self.runtime.replace_jump_to_id_urls(
                            tip_html)
                    review_tips.append(tip_html)
        return review_tips

    def show_extended_feedback(self):
        return self.extended_feedback and self.max_attempts_reached

    @XBlock.json_handler
    def get_results(self, queries, suffix=''):
        """
        Gets detailed results in the case of extended feedback.

        Right now there are two ways to get results-- through the template upon loading up
        the mentoring block, or after submission of an AJAX request like in
        submit or get_results here.
        """
        if self.mode == 'standard':
            results, completed, show_message = self._get_standard_results()
            mentoring_completed = completed
        else:
            if not self.show_extended_feedback():
                return {
                    'results': [],
                    'error': 'Extended feedback results cannot be obtained.'
                }

            results, completed, show_message = self._get_assessment_results(
                queries)
            mentoring_completed = True

        result = {
            'results': results,
            'completed': completed,
            'step': self.step,
            'max_attempts': self.max_attempts,
            'num_attempts': self.num_attempts,
        }

        if show_message:
            result['message'] = self.get_message(mentoring_completed)

        return result

    def _get_standard_results(self):
        """
        Gets previous submissions results as if submit was called with exactly the same values as last time.
        """
        results = []
        completed = True
        show_message = bool(self.student_results)

        # In standard mode, all children is visible simultaneously, so need collecting responses from all of them
        for child in self.steps:
            child_result = child.get_last_result()
            results.append([child.name, child_result])
            completed = completed and (child_result.get('status', None)
                                       == 'correct')

        return results, completed, show_message

    def _get_assessment_results(self, queries):
        """
        Gets detailed results in the case of extended feedback.

        It may be a good idea to eventually have this function get results
        in the general case instead of loading them in the template in the future,
        and only using it for extended feedback situations.

        Right now there are two ways to get results-- through the template upon loading up
        the mentoring block, or after submission of an AJAX request like in
        submit or get_results here.
        """
        results = []
        completed = True
        choices = dict(self.student_results)
        # Only one child should ever be of concern with this method.
        for child in self.steps:
            if child.name and child.name in queries:
                results = [child.name, child.get_results(choices[child.name])]
                # Children may have their own definition of 'completed' which can vary from the general case
                # of the whole mentoring block being completed. This is because in standard mode, all children
                # must be correct to complete the block. In assessment mode with extended feedback, completion
                # happens when you're out of attempts, no matter how you did.
                completed = choices[child.name]['status']
                break

        return results, completed, True

    @XBlock.json_handler
    def submit(self, submissions, suffix=''):
        log.info(u'Received submissions: {}'.format(submissions))
        # server-side check that the user is allowed to submit:
        if self.max_attempts_reached:
            raise JsonHandlerError(
                403, "Maximum number of attempts already reached.")
        elif self.has_missing_dependency:
            raise JsonHandlerError(
                403,
                "You need to complete all previous steps before being able to complete the current one."
            )

        # This has now been attempted:
        self.attempted = True

        if self.is_assessment:
            return self.handle_assessment_submit(submissions, suffix)

        submit_results = []
        previously_completed = self.completed
        completed = True
        for child in self.steps:
            if child.name and child.name in submissions:
                submission = submissions[child.name]
                child_result = child.submit(submission)
                submit_results.append([child.name, child_result])
                child.save()
                completed = completed and (child_result['status'] == 'correct')

        if completed and self.next_step == self.url_name:
            self.next_step = self.followed_by

        # Update the score and attempts, unless the user had already achieved a perfect score ("completed"):
        if not previously_completed:
            # Update the results
            while self.student_results:
                self.student_results.pop()
            for result in submit_results:
                self.student_results.append(result)

            # Save the user's latest score
            self.runtime.publish(self, 'grade', {
                'value': self.score.raw,
                'max_value': 1,
            })

            # Mark this as having used an attempt:
            if self.max_attempts > 0:
                self.num_attempts += 1

        # Save the completion status.
        # Once it has been completed once, keep completion even if user changes values
        self.completed = bool(completed) or previously_completed

        message = self.get_message(completed)
        raw_score = self.score.raw

        self.runtime.publish(
            self, 'xblock.problem_builder.submitted', {
                'num_attempts': self.num_attempts,
                'submitted_answer': submissions,
                'grade': raw_score,
            })

        return {
            'results': submit_results,
            'completed': self.completed,
            'message': message,
            'max_attempts': self.max_attempts,
            'num_attempts': self.num_attempts,
        }

    def handle_assessment_submit(self, submissions, suffix):
        completed = False
        current_child = None
        children = [
            self.runtime.get_block(child_id) for child_id in self.children
        ]
        children = [
            child for child in children
            if not isinstance(child, MentoringMessageBlock)
        ]
        # The following is faster than the self.step_ids property
        steps = [
            child for child in children if isinstance(child, QuestionMixin)
        ]
        assessment_message = None
        review_tips = []

        for child in children:
            if child.name and child.name in submissions:
                submission = submissions[child.name]

                # Assessment mode doesn't allow to modify answers
                # This will get the student back at the step he should be
                current_child = child
                step = steps.index(child)
                if self.step > step or self.max_attempts_reached:
                    step = self.step
                    completed = False
                    break

                self.step = step + 1

                child_result = child.submit(submission)
                if 'tips' in child_result:
                    del child_result['tips']
                self.student_results.append([child.name, child_result])
                completed = child_result['status']

        event_data = {}

        score = self.score

        if current_child == steps[-1]:
            log.info(u'Last assessment step submitted: {}'.format(submissions))
            self.runtime.publish(self, 'grade', {
                'value': score.raw,
                'max_value': 1,
                'score_type': 'proficiency',
            })
            event_data['final_grade'] = score.raw
            assessment_message = self.assessment_message
            review_tips = self.review_tips

            self.num_attempts += 1
            self.completed = True

        event_data['exercise_id'] = current_child.name
        event_data['num_attempts'] = self.num_attempts
        event_data['submitted_answer'] = submissions

        self.runtime.publish(self,
                             'xblock.problem_builder.assessment.submitted',
                             event_data)

        return {
            'completed': completed,
            'max_attempts': self.max_attempts,
            'num_attempts': self.num_attempts,
            'step': self.step,
            'score': score.percentage,
            'correct_answer': len(score.correct),
            'incorrect_answer': len(score.incorrect),
            'partially_correct_answer': len(score.partially_correct),
            'correct': self.correct_json(stringify=False),
            'incorrect': self.incorrect_json(stringify=False),
            'partial': self.partial_json(stringify=False),
            'extended_feedback': self.show_extended_feedback() or '',
            'assessment_message': assessment_message,
            'assessment_review_tips': review_tips,
        }

    @XBlock.json_handler
    def try_again(self, data, suffix=''):

        if self.max_attempts_reached:
            return {'result': 'error', 'message': 'max attempts reached'}

        # reset
        self.step = 0
        self.completed = False

        while self.student_results:
            self.student_results.pop()

        return {'result': 'success'}

    def validate(self):
        """
        Validates the state of this XBlock except for individual field values.
        """
        validation = super(MentoringBlock, self).validate()
        a_child_has_issues = False
        message_types_present = set()
        for child_id in self.children:
            child = self.runtime.get_block(child_id)
            # Check if the child has any errors:
            if not child.validate().empty:
                a_child_has_issues = True
            # Ensure there is only one "message" block of each type:
            if isinstance(child, MentoringMessageBlock):
                msg_type = child.type
                if msg_type in message_types_present:
                    validation.add(
                        ValidationMessage(
                            ValidationMessage.ERROR,
                            self.
                            _(u"There should only be one '{msg_type}' message component."
                              ).format(msg_type=msg_type)))
                message_types_present.add(msg_type)
        if a_child_has_issues:
            validation.add(
                ValidationMessage(
                    ValidationMessage.ERROR,
                    self.
                    _(u"A component inside this mentoring block has issues.")))
        return validation

    def author_edit_view(self, context):
        """
        Add some HTML to the author view that allows authors to add child blocks.
        """
        fragment = Fragment(
            u'<div class="mentoring">'
        )  # This DIV is needed for CSS to apply to the previews
        self.render_children(context,
                             fragment,
                             can_reorder=True,
                             can_add=False)
        fragment.add_content(u'</div>')
        fragment.add_content(
            loader.render_template('templates/html/mentoring_add_buttons.html',
                                   {}))
        fragment.add_content(
            loader.render_template('templates/html/mentoring_url_name.html',
                                   {"url_name": self.url_name}))
        fragment.add_css_url(
            self.runtime.local_resource_url(self,
                                            'public/css/problem-builder.css'))
        fragment.add_css_url(
            self.runtime.local_resource_url(
                self, 'public/css/problem-builder-edit.css'))
        fragment.add_css_url(
            self.runtime.local_resource_url(
                self, 'public/css/problem-builder-tinymce-content.css'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self, 'public/js/util.js'))
        fragment.add_javascript_url(
            self.runtime.local_resource_url(self,
                                            'public/js/mentoring_edit.js'))
        fragment.initialize_js('MentoringEditComponents')
        return fragment

    @staticmethod
    def workbench_scenarios():
        """
        Scenarios displayed by the workbench. Load them from external (private) repository
        """
        return loader.load_scenarios_from_path('templates/xml')