Exemplo n.º 1
0
class NumericQuestion(Question):
    answer = models.FloatField(
        _('Answer'),
        help_text=_('The numeric value for the correct answer')
    )
    tolerance = models.FloatField(
        _('Tolerance'),
        help_text=_('If given, defines the tolerance within responses are '
                    'still considered to be correct'),
        default=0,
        blank=True,
    )

    response_cls = NumericResponse

    @property
    def is_exact(self):
        return self.tolerance == 0

    @property
    def start(self):
        return self.answer - abs(self.tolerance)

    @property
    def end(self):
        return self.answer + abs(self.tolerance)

    @property
    def range(self):
        return self.start, self.end

    def grade(self, response):
        x, y = self.range
        response.grade = (100 if x <= response.value <= y else 0)
        response.save()
Exemplo n.º 2
0
class NumericQuestion(Question):
    """
    A very simple question with a simple numeric answer.
    """

    correct_answer = models.FloatField(
        _('Correct answer'),
        help_text=_('The expected numeric answer for question.'))

    tolerance = models.FloatField(
        _('Tolerance'),
        default=0,
        help_text=_('If tolerance is zero, the responses must be exact.'),
    )
    label = models.CharField(
        _('Label'),
        max_length=100,
        default=_('Answer'),
        help_text=_(
            'The label text that is displayed in the submission form.'),
    )
    help_text = models.TextField(
        _('Help text'),
        blank=True,
        help_text=_(
            'Additional explanation that is displayed under the input form.'))

    class Meta:
        verbose_name = _('Numeric question')
        verbose_name_plural = _('Numeric questions')

    def get_form_class(self):
        class NumericForm(forms.Form):
            value = forms.FloatField(label=self.label, required=True)

        return NumericForm

    def get_form(self, *args, **kwargs):
        return self.get_form_class()(*args, **kwargs)

    # Serving Pages
    template = 'questions/numeric/detail.jinja2'

    def get_context(self, request, **kwargs):
        ctx = super().get_context(request, **kwargs)
        ctx['form'] = self.get_form(request.POST)
        return ctx

    def get_submission_kwargs(self, request, kwargs):
        return {'value': float(kwargs.get('value', None) or 0)}
Exemplo n.º 3
0
class QuizItem(models.Orderable):
    """
    A question in a quiz.
    """

    quiz = models.ParentalKey(
        'cs_questions.Quiz',
        related_name='quiz_items',
    )
    question = models.ForeignKey(
        'wagtailcore.Page',
        related_name='+',
    )
    weight = models.FloatField(
        _('value'),
        default=1.0,
        help_text=_(
            'The non-normalized weight of this item in the total quiz grade.'),
    )

    # Wagtail admin
    panels = [
        panels.PageChooserPanel('question', [
            'cs_questions.CodingIoQuestion',
            'cs_questions.FormQuestion',
        ]),
        panels.FieldPanel('weight'),
    ]
Exemplo n.º 4
0
class NumericSubmission(QuestionSubmission):
    """
    Submission class for numeric questions.
    """

    value = models.FloatField()

    def compute_hash(self):
        return str(hash(self.value))
Exemplo n.º 5
0
class NumericResponse(QuestionResponse):
    """The response object to a simple numeric question."""

    value = models.FloatField(_('Value'),
                              help_text=_('Result (it must be a number)'))

    def autograde_compute(self):
        question = self.question
        if abs(self.value - question.answer) <= question.tolerance:
            return 100
        else:
            return 0
Exemplo n.º 6
0
class CodingIoQuestion(Question):
    """
    CodeIo questions evaluate source code and judge them by checking if the
    inputs and corresponding outputs match an expected pattern.
    """
    class Meta:
        verbose_name = _('Programming question (IO-based)')
        verbose_name_plural = _('Programming questions (IO-based)')

    EXT_TO_METHOD_CONVERSIONS = dict(
        Question.EXT_TO_METHOD_CONVERSIONS,
        md='markio',
    )

    iospec_size = models.PositiveIntegerField(
        _('number of iospec template expansions'),
        default=10,
        help_text=_(
            'The desired number of test cases that will be computed after '
            'comparing the iospec template with the answer key. This is only a '
            'suggested value and will only be applied if the response template '
            'uses input commands to generate random input.'),
    )
    iospec_source = models.TextField(
        _('response template'),
        help_text=_(
            'Template used to grade I/O responses. See '
            'http://pythonhosted.org/iospec for a complete reference on the '
            'template format.'),
    )
    iospec_hash = models.CharField(
        max_length=32,
        blank=True,
        help_text=_('A hash to keep track of iospec updates.'),
    )
    timeout = models.FloatField(
        _('timeout in seconds'),
        blank=True,
        default=1.0,
        help_text=_(
            'Defines the maximum runtime the grader will spend evaluating '
            'each test case.'),
    )
    language = models.ForeignKey(
        ProgrammingLanguage,
        on_delete=models.SET_NULL,
        blank=True,
        null=True,
        help_text=_(
            'Programming language associated with question. Leave it blank in '
            'order to accept submissions in any programming language. This '
            'option should be set only for questions that tests specific '
            'programming languages constructs or require techniques that only '
            'make sense in specific programming languages.'),
    )

    __iospec_updated = False
    __answers = ()

    @lazy
    def iospec(self):
        """
        The IoSpec structure corresponding to the iospec_source.
        """

        return parse_iospec(self.iospec_source)

    def __init__(self, *args, **kwargs):
        # Supports automatic conversion between iospec data and iospec_source
        iospec = kwargs.pop('iospec', None)
        if iospec:
            kwargs['iospec_source'] = iospec.source()
            self.iospec = iospec
        super().__init__(*args, **kwargs)

    def load_from_file_data(self, file_data):
        fake_post = super().load_from_file_data(file_data)
        fake_post['iospec_source'] = self.iospec_source
        return fake_post

    def clean(self):
        """
        Validate the iospec_source field.
        """

        super().clean()

        # We first should check if the iospec_source has been changed and would
        # require a possibly expensive validation.
        source = self.iospec_source
        iospec_hash = md5hash(source)
        if self.iospec_hash != iospec_hash:
            try:
                self.iospec = iospec = parse_iospec(self.iospec_source)
            except Exception as ex:
                raise ValidationError(
                    {'iospec_source': _('invalid iospec syntax: %s' % ex)})

            # Now we check if the new iospec requires an answer key code and
            # if it has some answer key defined
            self.__iospec_updated = True
            return
            if (not iospec.is_expanded) and not self.answers.has_program():
                raise ValidationError({
                    'iospec_source':
                    _('You iospec definition uses a command or an @input block '
                      'and thus requires an example grading code. Please define '
                      'an "Answer Key" item with source code for at least one '
                      'programming language.')
                })

    def load_from_markio(self, file_data):
        """
        Load question parameters from Markio file.
        """

        data = markio.parse(file_data)

        # Load simple data from markio
        self.title = data.title or self.title
        self.short_description = (data.short_description
                                  or self.short_description)
        self.timeout = data.timeout or self.timeout
        self.author_name = data.author or self.author_name
        self.iospec_source = data.tests or self.iospec_source

        # Load main description
        # noinspection PyUnresolvedReferences
        self.body = markdown_to_blocks(data.description)

        # Add answer keys
        answer_keys = OrderedDict()
        for (lang, answer_key) in data.answer_key.items():
            language = programming_language(lang)
            key = self.answers.create(question=self,
                                      language=language,
                                      source=answer_key)
            answer_keys[lang] = key
        for (lang, placeholder) in data.placeholder.items():
            if placeholder is None:
                continue
            try:
                answer_keys[lang].placeholder = placeholder
            except KeyError:
                language = ProgrammingLanguage.objects.get(lang)
                self.answer_keys.create(question=self,
                                        language=language,
                                        placeholder=placeholder)
        self.__answers = list(answer_keys.values())

    # Serialization methods: support markio and sets it as the default
    # serialization method for CodingIoQuestion's
    @classmethod
    def load_markio(cls, source):
        """
        Creates a CodingIoQuestion object from a Markio object or source
        string and saves the resulting question in the database.

        This function can run without touching the database if the markio file
        does not define any information that should be saved in an answer key.

        Args:
            source:
                A string with the Markio source code.

        Returns:
            question:
                A question object.
        """

        raise NotImplementedError

    def dump_markio(self):
        """
        Serializes question into a string of Markio source.
        """

        tree = markio.Markio(
            title=self.name,
            author=self.author_name,
            timeout=self.timeout,
            short_description=self.short_description,
            description=self.long_description,
            tests=self.iospec_source,
        )

        for key in self.answer_keys.all():
            tree.add_answer_key(key.source, key.language.ref)
            tree.add_placeholder(key.placeholder, key.language.ref)

        return tree.source()

    def full_clean(self, *args, **kwargs):
        if self.__answers:
            self.answers = self.__answers
        super().full_clean(*args, **kwargs)

    def placeholder(self, language=None):
        """
        Return the placeholder text for the given language.
        """

        key = self.answers[language or self.language]
        if key is None:
            return ''
        return key.placeholder

    def reference_source(self, language=None):
        """
        Return the reference source code for the given language or None, if no
        reference is found.
        """

        key = self.answers[language or self.language]
        if key is None:
            return ''
        return key.source

    def run_code(self, source, language=None, iospec=None):
        """
        Run the given source code string of the given programming language
        using the default or the given IoSpec.

        If no code string is given, runs the reference source code, if it
        exists.
        """

        key = self.answers[language or self.language]
        return key.run(source, iospec)

    def update_iospec_source(self):
        """
        Updates the iospec_source attribute with the current iospec object.

        Any modifications made to `self.iospec` must be saved explicitly to
        persist in the database.
        """

        if 'iospec' in self.__dict__:
            self.iospec_source = self.iospec.source()

    def submit(self, user, source=None, language=None, **kwargs):
        # Fetch info from response_data
        response_data = kwargs.get('response_data', {})
        if source is None and 'source' in response_data:
            source = response_data.pop('source')
        if language is None and 'language' in response_data:
            language = response_data.pop('language')

        # Assure language is valid
        language = language or self.language
        if not language:
            raise ValueError(
                'could not determine the programming language for '
                'the submission')

        # Assure response data is empty
        if response_data:
            key = next(iter(response_data))
            raise TypeError('invalid or duplicate parameter passed to '
                            'response_data: %r' % key)

        # Construct response data and pass it to super
        response_data = {
            'language': language.ref,
            'source': source,
        }

        return super().submit(user, response_data=response_data, **kwargs)

    # Serving pages and routing
    template = 'questions/coding_io/detail.jinja2'
    template_submissions = 'questions/coding_io/submissions.jinja2'

    def get_context(self, request, *args, **kwargs):
        context = dict(super().get_context(request, *args, **kwargs),
                       form=True)

        # Select default mode for the ace editor
        if self.language:
            context['default_mode'] = self.language.ace_mode()
        else:
            context['default_mode'] = get_config('CODESCHOOL_DEFAULT_ACE_MODE',
                                                 'python')

        # Enable language selection
        if self.language is None:
            context['select_language'] = True
            context['languages'] = ProgrammingLanguage.supported.all()
        else:
            context['select_language'] = False

        return context

    @srvice.route(r'^submit-response/$')
    def route_submit(self, client, source=None, language=None, **kwargs):
        """
        Handles student responses via AJAX and a srvice program.
        """

        # User must choose language
        if not language:
            if self.language is None:
                client.dialog('<p class="dialog-text">%s</p>' %
                              _('Please select the correct language'))
                return
            language = self.language
        else:
            language = programming_language(language)

        # Bug with <ace-editor>?
        if not source or source == '\x01\x01':
            client.dialog('<p class="dialog-text">%s</p>' %
                          _('Internal error: please send it again!'))
            return

        super().route_submit(
            client=client,
            language=language,
            source=source,
        )

    @srvice.route(r'^placeholder/$')
    def route_placeholder(self, request, language):
        """
        Return the placeholder code for some language.
        """

        return self.get_placehoder(language)

    # Wagtail admin
    content_panels = Question.content_panels[:]
    content_panels.insert(
        -1,
        panels.MultiFieldPanel([
            panels.FieldPanel('iospec_size'),
            panels.FieldPanel('iospec_source'),
        ],
                               heading=_('IoSpec definitions')))
    content_panels.insert(
        -1, panels.InlinePanel('answers', label=_('Answer keys')))
    settings_panels = Question.settings_panels + [
        panels.MultiFieldPanel([
            panels.FieldPanel('language'),
            panels.FieldPanel('timeout'),
        ],
                               heading=_('Options'))
    ]
Exemplo n.º 7
0
class CodingIoQuestion(Question, models.StatusModel):
    """
    CodeIo questions evaluate source code and judge them by checking if the
    inputs and corresponding outputs match an expected pattern.
    """

    STATUS_INVALID = 'invalid'
    STATUS_UGLY = 'ugly'
    STATUS_DIRTY = 'dirty'
    STATUS_VALID = 'valid'
    STATUS_INCOMPLETE = 'incomplete'
    STATUS = models.Choices(
        (STATUS_INCOMPLETE, _('is not yet fully initialized')),
        (STATUS_INVALID, _('no valid answers')),
        (STATUS_UGLY, _('inconsistent answers')),
        (STATUS_DIRTY, _('some valid answers')),
        (STATUS_VALID, _('valid')),
    )
    iospec_size = models.PositiveIntegerField(
        _('number of iospec template expansions'),
        default=0,
        blank=True,
        help_text=_('The desired number of test cases that will be computed'
                    'after comparing the iospec template with the answer key.'
                    'This is only a suggested value and will only be applied if'
                    'the response template uses input commands to generate'
                    'random input.'),
    )
    iospec_source = models.TextField(
        _('response template'),
        blank=True,
        help_text=_('Template used to grade I/O responses. See '
                    'http://pythonhosted.org/iospec for a complete reference '
                    'on the template format.'),
    )
    timeout = models.FloatField(
            _('timeout in seconds'),
            blank=True,
            default=5.0,
            help_text=_('Defines the maximum runtime the grader will spend '
                        'evaluating each test case.'),
    )
    tracker = FieldTracker()

    @property
    def iospec(self):
        """The IoSpec structure corresponding to the iospec_source."""

        return parse_iospec(self.iospec_source)

    @property
    def hash(self):
        """The hash for the iospec_source string. This hash is compared to a
        hash registered to each answer key to check if it has the most current
        iospec data."""

        return md5hash(self.iospec_source + str(self.iospec_size))

    @property
    def is_answer_key_complete(self):
        """Return True if an answer key exists for all programming languages."""

        refs = self.is_answer_keys.values('language__ref', flatten=True)
        all_refs = ProgrammingLanguage.objects.values('ref', flatten=True)
        return set(all_refs) == set(refs)

    class Meta:
        app_label = 'cs_questions'
        verbose_name = _('input/output question')
        verbose_name_plural = _('input/output questions')

    # Importing and exporting
    @classmethod
    def from_markio(cls, source, commit=None, return_keys=False):
        """Creates a CodingIoQuestion object from a Markio object r source
        string and saves the resulting question in the database.

        This function can run without touching the database if the markio file
        does not define any information that should be saved in an answer key.

        Args:
            source:
                A string with the Markio source code.
            commit (bool):
                If True (default), saves resulting question in the database.
            return_keys (bool):
                If True, also return a dictionary mapping language references
                to answer keys.

        Returns:
            question:
                A question object.
            [answer_keys]:
                A map from language references to :class:`AnswerKeyItem`
                objects.
        """

        import markio

        if isinstance(source, markio.Markio):
            data = source
        else:
            data = markio.parse_string(source)

        # Create question object from parsed markio data
        question = CodingIoQuestion(
            title=data.title,
            author_name=data.author,
            timeout=data.timeout,
            short_description=data.short_description,
            long_description=data.description,
            iospec_source=data.tests,
        )
        saving(question, commit)

        # Add answer keys
        answer_keys = {}
        for (lang, answer_key) in data.answer_key.items():
            language = programming_language(lang)
            key = saving(CodingIoAnswerKey(question=question,
                                           language=language,
                                           source=answer_key), commit)
            answer_keys[lang] = key
        for (lang, placeholder) in data.placeholder.items():
            if placeholder is None:
                continue
            try:
                answer_keys[lang].placeholder = placeholder
                saving(answer_keys[lang], commit, update_fields=['placeholder'])
            except KeyError:
                language = ProgrammingLanguage.objects.get(lang)
                key = CodingIoAnswerKey(question=question,
                                        language=language,
                                        placeholder=placeholder)
                saving(key, commit)

        # Question is done!
        if return_keys:
            answer_keys = {key.language.ref: key
                           for key in answer_keys.values()}
            return question, answer_keys
        return question

    @classmethod
    def from_data(cls, source):
        """Return a new CodingIoQuestion instance from a string of Markio
        data. This API is used by the HasUploadMixin in the create view."""

        return cls.from_markio(source.decode('utf8'))

    def to_markio(self):
        """Serializes question into a string of Markio source."""

        import markio

        tree = markio.Markio(
            title=self.name,
            author=self.author_name,
            timeout=self.timeout,
            short_description=self.short_description,
            description=self.long_description,
            tests=self.iospec_source,
        )

        for key in self.answer_keys.all():
            tree.add_answer_key(key.source, key.language.ref)
            tree.add_placeholder(key.placeholder, key.language.ref)

        return tree.source()

    def to_data(self, type=None):
        """Render question as a Markio source. This API is used by the
        DetailView in the CRUD interface."""

        if type in (None, 'markio'):
            return self.to_markio()
        else:
            return NotImplemented

    # Validation
    def update(self, save=True, validate=True):
        """Update and validate all answer keys."""

        exception = None
        expanded_sources = {}
        invalid_languages = set()
        valid_languages = set()

        def validate_answer_keys():
            nonlocal exception

            for key in self.answer_keys.all():
                try:
                    if not key.is_update:
                        key.question = self
                        key.update(save, validate)
                    if not key.is_valid:
                        invalid_languages.add(key.language.ref)
                    elif key.source:
                        valid_languages.add(key.language.ref)
                except key.ValidationError as ex:
                    exception = ex
                    exception.__traceback__ = exception.__traceback__
                if key.iospec_source:
                    expanded_sources[key.language.ref] = key.iospec_source

            if len(expanded_sources) == 0:
                self.status = 'invalid'
            elif len(set(expanded_sources.values())) != 1:
                self.status = 'ugly'
            elif invalid_languages:
                if valid_languages:
                    self.status = 'dirty'
                else:
                    self.status = 'invalid'
            else:
                self.status = 'valid'

        # Save fields if rollback is necessary
        iospec_source = self.iospec_source
        iospec_size = self.iospec_size
        has_changed = (self.tracker.has_changed('iospec_source') or
                       self.tracker.has_changed('iospec_size'))

        # If fields had changed, update and restore original values
        if has_changed:
            self.save(update_fields=['iospec_source', 'iospec_size'])
            try:
                validate_answer_keys()
            finally:
                if not save:
                    self.iospec_size = iospec_size
                    self.iospec_source = iospec_source
                    self.save(update_fields=['iospec_source', 'iospec_size'])
        else:
            validate_answer_keys()

        # Force save if necessary
        if save:
            self.save()

    def update_keys(self):
        """Update all keys that were not updated."""

        for key in self.answer_keys.exclude(iospec_hash=self.hash):
            key.update(validate=False)

    def get_validation_errors(self, lang=None, test_iospec=True):
        """Raise ValueError if some answer key is invalid or produce
         invalid iospec expansions.

         Return a valid iospec tree expansion or None if no expansion was
         possible (e.g., by the lack of source code in the answer key)."""

        # It cannot be valid if the iospec source does not not parse
        if test_iospec:
            try:
                tree = parse_iospec(self.iospec)
            except SyntaxError as ex:
                raise ValueError('invalid iospec syntax: %s' % ex)

        # Expand to all langs if lang is not given
        if lang is None:
            keys = self.answer_keys.exclude(source='')
            langs = keys.values_list('language', flat=True)
            expansions = [self.is_valid(lang, test_iospec=False)
                          for lang in langs]
            if not expansions:
                return None
            if iospec.ioequal(expansions):
                return expansions[0]

        # Test an specific language
        if isinstance(lang, str):
            lang = ProgrammingLanguage.get(ref=lang)
        try:
            key = self.answer_keys.get(language=lang)
        except self.DoesNotExist:
            return None

        if key.source:
            result = run_code(key.source, key, lang=lang.ref)
            if result.has_errors():
                raise result.get_exception()
            return result
        else:
            return None

    # Other API
    def get_placeholder(self, lang):
        """Return the placeholder text for the given language."""

        if isinstance(lang, str):
            try:
                lang = ProgrammingLanguage.objects.get(ref=lang)
            except ProgrammingLanguage.DoesNotExist:
                return ''
        try:
            key = self.answer_keys.get(language=lang)
            return key.placeholder
        except CodingIoAnswerKey.DoesNotExist:
            return ''

    def grade(self, response, error=None):
        """Grade the given response object and return the corresponding
        feedback object."""

        try:
            key = self.answer_keys.get(language=response.language)
            key.assure_is_valid(error)
            iospec_data = key.iospec
        except CodingIoAnswerKey.DoesNotExist:
            self.update_keys()

            # Get all sources
            iospec_sources = self.answer_keys.filter(is_valid=True)\
                .values_list('iospec_source', flat=True)
            iospec_sources = set(iospec_sources)

            # Check if there is only a single distinct source
            if not iospec_sources:
                iospec_data = self.iospec.copy()
                iospec_data.expand_inputs()
                if not all(isinstance(x, SimpleTestCase) for x in iospec_data):
                    raise (
                        error or
                        CodingIoAnswerKey.ValidationError(iospec_data.pformat())
                    )
            elif len(iospec_sources) == 1:
                iospec_data = parse_iospec(next(iter(iospec_sources)))
            else:
                raise error or CodingIoAnswerKey.ValidationError(iospec_sources)

        # Construct ejudge feedback object
        lang = response.language.ref
        source = response.source
        return grade_code(source, iospec_data, lang=lang)
Exemplo n.º 8
0
class Progress(CommitMixin,
               models.CopyMixin,
               models.StatusModel,
               models.TimeStampedModel,
               models.PolymorphicModel):
    """
    When an user starts an activity it opens a Progress object which control
    all submissions to the given activity.

    The Progress object also manages individual submissions that may span
    several http requests.
    """

    class Meta:
        unique_together = [('user', 'activity_page')]
        verbose_name = _('student progress')
        verbose_name_plural = _('student progress list')

    STATUS_OPENED = 'opened'
    STATUS_CLOSED = 'closed'
    STATUS_INCOMPLETE = 'incomplete'
    STATUS_WAITING = 'waiting'
    STATUS_INVALID = 'invalid'
    STATUS_DONE = 'done'

    STATUS = models.Choices(
        (STATUS_OPENED, _('opened')),
        (STATUS_CLOSED, _('closed')),
    )

    user = models.ForeignKey(models.User, on_delete=models.CASCADE)
    activity_page = models.ForeignKey(models.Page, on_delete=models.CASCADE)
    final_grade_pc = models.DecimalField(
        _('final score'),
        max_digits=6, decimal_places=3, default=Decimal,
        help_text=_(
            'Final grade given to considering all submissions, penalties, etc.'
        ),
    )
    given_grade_pc = models.DecimalField(
        _('grade'),
        max_digits=6, decimal_places=3, default=Decimal,
        help_text=_('Final grade before applying any modifier.'),
    )
    finished = models.DateTimeField(blank=True, null=True)
    best_submission = models.ForeignKey('Submission', blank=True, null=True,
                                        related_name='+')
    points = models.IntegerField(default=0)
    score = models.IntegerField(default=0)
    stars = models.FloatField(default=0.0)
    is_correct = models.BooleanField(default=bool)
    has_submissions = models.BooleanField(default=bool)
    has_feedback = models.BooleanField(default=bool)
    has_post_tests = models.BooleanField(default=bool)
    objects = ProgressManager()

    #: The number of submissions
    num_submissions = property(lambda x: x.submissions.count())

    #: Specific activity reference
    activity = property(lambda x: x.activity_page.specific)
    activity_id = property(lambda x: x.activity_page_id)

    #: Has progress mixin interface
    username = property(lambda x: x.user.username)

    def __repr__(self):
        return '<%s: %s>' % (self.__class__.__name__, self)

    def __str__(self):
        tries = self.num_submissions
        user = self.user
        activity = self.activity
        grade = '%s pts' % (self.final_grade_pc or 0)
        fmt = '%s by %s (%s, %s tries)'
        return fmt % (activity, user, grade, tries)

    def submit(self, request, payload, recycle=True, commit=True):
        """
        Creates new submission.

        Args:
            recycle:
                If True, recycle submission objects with the same content as the
                current submission. If a submission exists with the same content
                as the current submission, it simply returns the previous
                submission. If recycled, sets the submission.recycled to True.
        """

        submission_class = self.activity.submission_class
        submission = submission_class(progress=self, **payload)
        submission.ip_address = get_ip(request)
        submission.hash = submission.compute_hash()
        submission.full_clean()

        # Then check if any submission is equal to some past submission and
        # then recycle it
        recyclable = submission_class.objects.recyclable(submission)
        recyclable = recyclable if recycle else ()
        for possibly_equal in recyclable:
            if submission.is_equal(possibly_equal):
                possibly_equal.recycled = True
                possibly_equal.bump_recycles()
                return possibly_equal
        else:
            return submission.commit(commit)

    def register_feedback(self, feedback, commit=True):
        """
        This method is called after a submission is graded and produces a
        feedback.
        """

        submission = feedback.submission

        # Check if it is the best submission
        grade = feedback.given_grade_pc
        if (self.best_submission is None or
                self.best_submission.feedback.given_grade_pc < grade):
            self.best_submission = submission

        # Update grades for activity considering past submissions
        self.update_grades_from_feedback(feedback)
        self.commit(commit)

    def update_grades_from_feedback(self, feedback):
        """
        Update grades from the current progress object from the given feedback.
        """

        # Update grades, keeping always the best grade
        if self.given_grade_pc < (feedback.given_grade_pc or 0):
            self.given_grade_pc = feedback.given_grade_pc
        if self.final_grade_pc < feedback.final_grade_pc:
            self.final_grade_pc = feedback.final_grade_pc

        # Update the is_correct field
        self.is_correct = self.is_correct or feedback.is_correct
Exemplo n.º 9
0
class GlobalAchievement(models.Model):
    user = models.ForeignKey(models.User)
    score = models.FloatField()
    token = models.CharField(max_length=100)
Exemplo n.º 10
0
class CodingIoQuestion(Question):
    """
    CodeIo questions evaluate source code and judge them by checking if the
    inputs and corresponding outputs match an expected pattern.
    """
    class Meta:
        verbose_name = _('input/output question')
        verbose_name_plural = _('input/output questions')

    iospec_size = models.PositiveIntegerField(
        _('number of iospec template expansions'),
        default=10,
        help_text=_(
            'The desired number of test cases that will be computed after '
            'comparing the iospec template with the answer key. This is only a '
            'suggested value and will only be applied if the response template '
            'uses input commands to generate random input.'),
    )
    iospec_source = models.TextField(
        _('response template'),
        help_text=_(
            'Template used to grade I/O responses. See '
            'http://pythonhosted.org/iospec for a complete reference on the '
            'template format.'),
    )
    iospec_hash = models.CharField(
        max_length=32,
        blank=True,
        help_text=_('A hash to keep track of iospec updates.'),
    )
    timeout = models.FloatField(
        _('timeout in seconds'),
        blank=True,
        default=1.0,
        help_text=_(
            'Defines the maximum runtime the grader will spend evaluating '
            'each test case.'),
    )
    is_usable = models.BooleanField(
        _('is usable'),
        help_text=_(
            'Tells if the question has at least one usable iospec entry. A '
            'complete iospec may be given from a single iospec source or by a '
            'combination of a valid source and a reference computer program.'))
    is_consistent = models.BooleanField(
        _('is consistent'),
        help_text=_(
            'Checks if all given answer keys are consistent with each other. '
            'The question might become inconsistent by the addition of an '
            'reference program that yields different results from the '
            'equivalent program in a different language.'))

    @lazy
    def iospec(self):
        """
        The IoSpec structure corresponding to the iospec_source.
        """

        return parse_iospec(self.iospec_source)

    @property
    def is_answer_key_complete(self):
        """
        Return True if an answer key item exists for all programming languages.
        """

        refs = self.is_answer_keys.values('language__ref', flatten=True)
        all_refs = ProgrammingLanguage.objects.values('ref', flatten=True)
        return set(all_refs) == set(refs)

    @bound_property
    def language(self):
        """
        Instances can be bound to a programming language.
        """

        return getattr(self, '_language_bind', None)

    @language.setter
    def language(self, value):
        self._language_bind = programming_language(value, raises=False)

    @property
    def is_language_bound(self):
        return self.language is not None

    @property
    def default_language(self):
        """
        The main language associated with this question if a single answer key
        is defined.
        """

        return self.answer_key_items.get().language

    def _language(self, language=None, raises=True):
        # Shortcut used internally to normalize the given language
        if language is None:
            return self.language or self.default_language
        return programming_language(language, raises)

    def __init__(self, *args, **kwargs):
        # Supports automatic conversion between iospec data and iospec_source
        iospec = kwargs.pop('iospec', None)
        if iospec:
            kwargs['iospec_source'] = iospec.source()
            self.iospec = iospec
        super().__init__(*args, **kwargs)

    def clean(self):
        """
        Validate the iospec_source field.
        """

        super().clean()

        # We first should check if the iospec_source has been changed and thus
        # requires a possibly expensive validation.
        source = self.iospec_source
        iospec_hash = md5hash(source)
        if self.iospec_hash != iospec_hash:
            try:
                self.iospec = iospec.parse_string(self.iospec_source)
            except Exception:
                raise ValidationError(
                    {'iospec_source': _('invalid iospec syntax')})
            else:
                self.iospec_hash = iospec_hash
                if self.pk is None:
                    self.is_usable = self.iospec.is_simple
                    self.is_consistent = True
                else:
                    self.is_usable = self._is_usable(self.iospec)
                    self.is_consistent = self._is_consistent(self.iospec)

    def _is_usable(self, iospec):
        """
        This function is triggered during the clean() validation when a new
        iospec data is inserted into the database.
        """

        # Simple iospecs are always valid since they can be compared with
        # arbitrary programs.
        if iospec.is_simple_io:
            return True

        # For now we reject all complex iospec structures
        return False

    def _is_consistent(self, iospec):
        """
        This function is triggered during the clean() validation when a new
        iospec data is inserted into the database.
        """

        # Simple iospecs always produce consistent answer keys since we prevent
        # invalid reference programs of being inserted into the database
        # during AnswerKeyItem validation.
        if iospec.is_simple_io:
            return True

        # For now we reject all complex iospec structures
        return False

    # Serialization methods: support markio and sets it as the default
    # serialization method for CodingIoQuestion's
    @classmethod
    def load_markio(cls, source):
        """
        Creates a CodingIoQuestion object from a Markio object or source
        string and saves the resulting question in the database.

        This function can run without touching the database if the markio file
        does not define any information that should be saved in an answer key.

        Args:
            source:
                A string with the Markio source code.

        Returns:
            question:
                A question object.
        """

        import markio

        if isinstance(source, markio.Markio):
            data = source
        else:
            data = markio.parse_string(source)

        # Create question object from parsed markio data
        question = CodingIoQuestion.objects.create(
            title=data.title,
            author_name=data.author,
            timeout=data.timeout,
            short_description=data.short_description,
            long_description=data.description,
            iospec_source=data.tests,
        )

        # Add answer keys
        answer_keys = {}
        for (lang, answer_key) in data.answer_key.items():
            language = programming_language(lang)
            key = question.answer_keys.create(language=language,
                                              source=answer_key)
            answer_keys[lang] = key
        for (lang, placeholder) in data.placeholder.items():
            if placeholder is None:
                continue
            try:
                answer_keys[lang].placeholder = placeholder
                answer_keys[lang].save(update_fields=['placeholder'])
            except KeyError:
                language = ProgrammingLanguage.objects.get(lang)
                question.answer_keys.create(language=language,
                                            placeholder=placeholder)
        return question

    @classmethod
    def load(cls, format='markio', **kwargs):
        return super().load(format=format, **kwargs)

    def dump_markio(self):
        """
        Serializes question into a string of Markio source.
        """

        import markio

        tree = markio.Markio(
            title=self.name,
            author=self.author_name,
            timeout=self.timeout,
            short_description=self.short_description,
            description=self.long_description,
            tests=self.iospec_source,
        )

        for key in self.answer_keys.all():
            tree.add_answer_key(key.source, key.language.ref)
            tree.add_placeholder(key.placeholder, key.language.ref)

        return tree.source()

    def answer_key_item(self, language=None):
        """
        Return the AnswerKeyItem instance for the requested language or None if
        no object is found.
        """

        language = self._language(language)
        try:
            return self.answer_key_items.get(language=language)
        except AnswerKeyItem.DoesNotExist:
            return None

    def answer_key(self, language=None):
        """
        Return the answer key IoSpec object associated with the given language.
        """

        key = self.answer_key_item(language)
        if key is None or key.iospec_source is None:
            new_key = self.answer_key_item()
            if key == new_key:
                if self.iospec.is_simple:
                    raise ValueError('no valid iospec is defined for the '
                                     'question')
                return iospec.expand_inputs(self.iospec_size)
            key = new_key

        # We check if the answer key item is synchronized with the parent hash
        if key.iospec_hash != key.parent_hash():
            try:
                key.update(self.iospec)
            except ValidationError:
                return self.iospec
        return key.iospec

    def placeholder(self, language=None):
        """
        Return the placeholder text for the given language.
        """

        if key is None:
            return ''
        return key.placeholder

    def reference_source(self, language=None):
        """
        Return the reference source code for the given language or None, if no
        reference is found.
        """

        key = self.answer_key_item(language)
        if key is None:
            return ''
        return key.source

    def run_code(self, source=None, iospec=None, language=None):
        """
        Run the given source code string for the programming language using the
        default IoSpec.

        If no code string is given, runs the reference source code, it it
        exists.
        """

        if language is None:
            language = self.answer_key_items.get().language
        key = self.answer_key_item(language)
        return key.run(source, iospec)

    def update_iospec_source(self):
        """
        Updates the iospec_source attribute with the current iospec object.

        Any modifications made to `self.iospec` must be saved explicitly to
        persist on the database.
        """

        if 'iospec' in self.__dict__:
            self.iospec_source = self.iospec.source()

    def register_response_item(self, source, language=None, **kwargs):
        response_data = {
            'language': self._language(language).ref,
            'source': source,
        }
        kwargs.update(response_data=response_data)
        return super().register_response_item(**kwargs)

    # Serving pages and routing
    @srvice.route(r'^submit-response/$')
    def respond_route(self, client, source=None, language=None, **kwargs):
        """
        Handles student responses via AJAX and a srvice program.
        """

        if not language:
            client.dialog('<p>Please select the correct language</p>')
            return

        # Bug with <ace-editor>?
        if not source or source == '\x01\x01':
            client.dialog('<p>Internal error: please send it again!</p>')
            return

        language = programming_language(language)
        self.bind(client.request, language=language, **kwargs)
        response = self.register_response_item(source, autograde=True)
        html = render_html(response.feedback)
        client.dialog(html)

    @srvice.route(r'^placeholder/$')
    def get_placeholder_route(self, request, language):
        """
        Return the placeholder code for some language.
        """

        return self.get_placehoder(language)

    def get_context(self, request, *args, **kwargs):
        context = super().get_context(request, *args, **kwargs)
        context['form'] = ResponseForm(request.POST)
        return context

    # Wagtail admin
    content_panels = Question.content_panels[:]
    content_panels.insert(
        -1,
        panels.MultiFieldPanel([
            panels.FieldPanel('iospec_size'),
            panels.FieldPanel('iospec_source'),
        ],
                               heading=_('IoSpec definitions')))
    content_panels.insert(
        -1, panels.InlinePanel('answer_key_items', label=_('Answer keys')))
Exemplo n.º 11
0
class CodingIoQuestion(Question):
    """
    CodeIo questions evaluate source code and judge them by checking if the
    inputs and corresponding outputs match an expected pattern.
    """
    class Meta:
        verbose_name = _('Programming question (IO-based)')
        verbose_name_plural = _('Programming questions (IO-based)')

    num_pre_tests = models.PositiveIntegerField(
        _('# of pre-test examples'),
        default=3,
        validators=[validators.positive_integer_validator],
        help_text=_(
            'The desired number of test cases that will be computed after '
            'comparing the iospec template with the correct answer. This is '
            'only a suggested value and will only be applied if the response '
            'template uses input commands to generate random input.'),
    )
    pre_tests_source = models.TextField(
        _('response template'),
        blank=True,
        validators=[validators.iospec_source_validator],
        help_text=_(
            'Template used to grade I/O responses. See '
            'http://pythonhosted.org/iospec for a complete reference on the '
            'template format.'),
    )
    num_post_tests = models.PositiveIntegerField(
        _('# of post-test examples'),
        validators=[validators.positive_integer_validator],
        default=20)
    post_tests_source = models.TextField(
        _('response template (post evaluation)'),
        validators=[validators.iospec_source_validator],
        blank=True,
        help_text=_(
            'These tests are used only in a second round of corrections and is '
            'not immediately shown to users.'),
    )
    test_state_hash = models.CharField(
        max_length=32,
        blank=True,
        help_text=_('A hash to keep track of iospec sources updates.'),
    )
    timeout = models.FloatField(
        _('timeout in seconds'),
        validators=[validators.timeout_validator],
        blank=True,
        default=1.0,
        help_text=_(
            'Defines the maximum runtime the grader will spend evaluating '
            'each test case.'),
    )
    default_placeholder = models.TextField(
        _('placeholder'),
        blank=True,
        help_text=_('Default placeholder message that is used if it is not '
                    'defined for the given language. This will appear as a '
                    'block of comment in the beginning of the submission.'))
    language = models.ForeignKey(
        ProgrammingLanguage,
        on_delete=models.SET_NULL,
        blank=True,
        null=True,
        help_text=_(
            'Programming language associated with question. Leave it blank in '
            'order to accept submissions in any programming language. This '
            'option should be set only for questions that tests specific '
            'programming languages constructs or require techniques that only '
            'make sense for specific programming languages.'),
    )

    # Fields for storing the results of an async post-validation.
    error_field = models.CharField(max_length=20, blank=True)
    error_message = models.TextField(blank=True)
    ignore_programming_errors = models.BooleanField(
        default=False,
        help_text=_(
            'Mark this if you want to ignore programming errors this time. It '
            'will ignore errors once, but you still have to fix the source '
            'of those errors to make the question become operational.'))

    __answers = ()
    _iospec_expansion_is_dirty = False

    @property
    def pre_tests(self):
        try:
            return self._pre_tests
        except AttributeError:
            self._pre_tests = parse_iospec(self.pre_tests_source)
            return self._pre_tests

    @pre_tests.setter
    def pre_tests(self, value):
        self._pre_tests = value
        self.pre_tests_source = value.source()

    @pre_tests.deleter
    def pre_tests(self):
        try:
            del self._pre_tests
        except AttributeError:
            pass

    @property
    def post_tests(self):
        try:
            return self._post_tests
        except AttributeError:
            if self.post_tests_source:
                post_tests = parse_iospec(self.post_tests_source)
            else:
                post_tests = IoSpec()
            self._post_tests = ejudge.combine_iospec(self.pre_tests,
                                                     post_tests)
            return self._post_tests

    @post_tests.setter
    def post_tests(self, value):
        pre_tests = self.pre_tests
        value = IoSpec([test for test in value if test not in pre_tests])
        self._post_tests = ejudge.combine_iospec(self.pre_tests, value)
        self.post_tests_source = value.source()

    @post_tests.deleter
    def post_tests(self):
        try:
            del self._post_tests
        except AttributeError:
            pass

    submission_class = CodingIoSubmission

    def load_post_file_data(self, file_data):
        fake_post = super().load_post_file_data(file_data)
        fake_post['pre_tests_source'] = self.pre_tests_source
        fake_post['post_tests_source'] = self.post_tests_source
        return fake_post

    # Expanding and controlling the tests state
    def has_test_state_changed(self):
        """
        Return True if test state has changed.
        """

        return self.test_state_hash == compute_test_state_hash(self)

    def get_current_test_state(self, update=False):
        """
        Return a current TestState object synchronized with the current
        pre and post tests.

        It raises a ValidationError if an error is encountered during the
        recreation of the test state.
        """

        if update:
            hash = compute_test_state_hash(self)
        else:
            hash = self.test_state_hash

        try:
            return TestState.objects.get(question=self, hash=hash)
        except TestState.DoesNotExist:
            pre_tests = self.pre_tests
            post_tests = self.post_tests

            def expand(x):
                result = expand_tests(self, x)
                check_expansions_with_all_programs(self, result)
                return result

            pre_source = expand(pre_tests).source()
            post_source = expand(post_tests).source()

            return TestState.objects.create(
                question=self,
                hash=hash,
                pre_tests_source=self.pre_tests_source,
                post_tests_source=self.post_tests_source,
                pre_tests_source_expansion=pre_source,
                post_tests_source_expansion=post_source,
            )

    def get_expanded_pre_tests(self):
        """
        Return an IoSpec object with the result of pre tests expansions.
        """

        state = self.get_current_test_state()
        source = state.pre_tests_source_expansion
        return parse_iospec(source)

    def get_expand_post_tests(self):
        """
        Return an IoSpec object with the result of post tests expansions.
        """

        state = self.get_current_test_state()
        source = state.post_tests_source_expansion
        return parse_iospec(source)

    def __expand_tests_to_source(self, tests):
        """
        Return the source of a iospec object full expansion.

        Similar to .expand_tests(), but return a string with the source code
        expansion.
        """

        if tests is None:
            return ''
        return self._expand_tests(tests)

    # Code runners
    def check_with_code(self, source, tests, language=None, timeout=None):
        """
        Wrapped version of check_with_code() that uses question's own timeout
        and language as default.
        """

        language = get_programming_language(language or self.language)
        timeout = timeout or self.timeout
        ejudge.check_with_code(source, tests, language, timeout)

    def run_code(self, source, tests, language=None, timeout=None):
        """
        Wrapped version of run_code() that uses question's own timeout
        and language as default.
        """

        language = get_programming_language(language or self.language)
        timeout = timeout or self.timeout
        return ejudge.run_code(source, tests, language, timeout)

    def grade_code(self, source, inputs, language=None, timeout=None):
        """
        Wrapped version of grade_code() that uses question's own timeout
        and language as default.
        """

        language = get_programming_language(language or self.language)
        timeout = timeout or self.timeout
        return ejudge.grade_code(source, inputs, language, timeout)

    def expand_from_code(self, source, inputs, language=None, timeout=None):
        """
        Wrapped version of expand_from_code() that uses question's own timeout
        and language as default.
        """

        language = get_programming_language(language or self.language)
        timeout = timeout or self.timeout
        return ejudge.expand_from_code(source, inputs, language, timeout)

    # Saving & validation
    def save(self, *args, **kwargs):
        self.test_state_hash = compute_test_state_hash(self)

        if not self.author_name and self.owner:
            name = self.owner.get_full_name() or self.owner.username
            email = self.owner.email
            self.author_name = '%s <%s>' % (name, email)

        super().save(*args, **kwargs)

    def clean(self):
        super().clean()

        if self.has_test_state_changed() or self.has_code_changed():
            logger.debug('%r: recomputing tests' % self.title)
            self.schedule_validation()

    def full_clean(self, *args, **kwargs):
        if self.__answers:
            self.answers = self.__answers
        super().full_clean(*args, **kwargs)

    def full_clean_expansions(self):
        self.get_current_test_state(update=True)

    def full_clean_answer_keys(self):
        """
        Performs a full_clean() validation step on all answer key objects.
        """

        for key in self.answers.all():
            try:
                key.question = self
                key.full_clean()
            except ValidationError as ex:
                raise validators.invalid_related_answer_key_error(key, ex)

    def full_clean_all(self, *args, **kwargs):
        self.full_clean(*args, **kwargs)
        self.full_clean_answer_keys()
        self.full_clean_expansions()

    def schedule_validation(self):
        """
        Schedule full validation to be performed in the background.

        This executes the full_clean_code() method
        """

        print('scheduling full code validation... (we are now executing on the'
              'foreground).')
        self.mark_invalid_code_fields()

    def mark_invalid_code_fields(self):
        """
        Performs a full code validation with .full_clean_code() and marks all
        errors found in the question.
        """

        return
        try:
            self.full_clean(force_expansions=True)
        except ValidationError as ex:
            print(ex)
            print(dir(ex))
            raise

    def validate_tests(self):
        """
        Triggered when (pre|post)_test_source changes or on the first time the
        .clean() method is called.
        """

        # Check if new source is valid
        for attr in ['pre_tests_source', 'post_tests_source']:
            try:
                source = getattr(self, attr)
                if source:
                    iospec = parse_iospec(source)
                else:
                    iospec = None
                setattr(self, attr[:-7], iospec)
            except Exception as ex:
                self.clear_tests()
                raise ValidationError(
                    {attr: _('invalid iospec syntax: %s' % ex)})

        # Computes temporary expansions for all sources. A second step may be
        # required in which we use the reference source in answer key to further
        # expand iospec data structures
        iospec = self.pre_tests.copy()
        iospec.expand_inputs(self.number_of_pre_expansions)
        self.pre_tests_expanded = iospec

        if self.pre_tests_source and self.post_tests_source:
            iospec = ejudge.combine_iospecs(self.pre_tests, self.post_tests)
        elif self.post_tests_source:
            iospec = self.post_tests.copy()
        elif self.pre_tests_source:
            iospec = self.pre_tests.copy()
        else:
            raise ValidationError(
                _('either pre_tests_source or post_tests_source must be given!'
                  ))
        iospec.expand_inputs(self.number_of_post_expansions)
        # assert len(iospec) >= self.number_of_expansions, iospec
        self.post_tests_expanded = iospec

        if self.pre_tests_expanded.is_expanded and \
                self.post_tests_expanded.is_expanded:
            self.pre_tests_expanded_source = self.pre_tests_expanded.source()
            self.post_tests_expanded_source = self.post_tests_expanded.source()

        else:
            self._expand_from_answer_keys()

        # Iospec is valid: save the hash
        self.tests_state_hash = self.current_tests_hash

    def _expand_from_answer_keys(self):
        # If the source requires expansion, we have to check all answer keys
        # to see if one of them defines a valid source and compute the expansion
        # from this source. All languages must produce the same expansion,
        # otherwise it is considered to be an error.
        #
        # If no answer key is available, leave pre_tests_expanded_source blank
        assert self.pre_tests_expanded is not None
        assert self.post_tests_expanded is not None
        pre, post = self.pre_tests_expanded, self.post_tests_expanded

        useful_keys = list(self.answers_with_code())
        if useful_keys:
            ex_pre = pre.copy()
            ex_pre.expand_inputs(self.number_of_pre_expansions)
            ex_post = post.copy()
            ex_post.expand_inputs(self.number_of_post_expansions)
            pre_list = self.answers.expand_all(ex_pre)
            post_list = self.answers.expand_all(ex_post)

            if len(pre_list) == len(post_list) == 1:
                ex_pre = pre_list[0]
                ex_post = post_list[0]
            else:

                def validate(L, field):
                    first, *tail = L
                    for i, elem in enumerate(tail, 1):
                        if first == elem:
                            continue

                        lang1 = useful_keys[0].language
                        lang2 = useful_keys[i].language
                        first.language = lang1
                        elem.language = lang2
                        self.clear_tests()
                        raise validators.inconsistent_testcase_error(
                            first, elem, field)

                validate(pre_list, 'pre_tests_expanded_source')
                validate(post_list, 'post_tests_expanded_source')
                ex_pre, ex_post = pre_list[0], post_list[0]

            # Update values
            self.pre_tests_expanded = ex_pre
            self.pre_tests_expanded_source = ex_pre.source()
            self.post_tests_expanded = ex_pre
            self.post_tests_expanded_source = ex_post.source()

    # Data access
    def get_placeholder(self, language=None):
        """
        Return the placeholder text for the given language.
        """

        key = self.answers.get(language or self.language, None)
        if key is None:
            return self.default_placeholder
        return key.placeholder

    def get_reference_source(self, language=None):
        """
        Return the reference source code for the given language or None, if no
        reference is found.
        """

        if language is None:
            language = self.language
        qs = self.answers.all().filter(
            language=get_programming_language(language))
        if qs:
            return qs.get().source
        return ''

    def get_submission_kwargs(self, request, kwargs):
        return dict(language=kwargs['language'], source=kwargs['source'])

    # Access answer key queryset
    def answers_with_code(self):
        """
        Filter only answers that define a program.
        """

        return self.answers.exclude(source='')

    def has_code_changed(self):
        """
        True if some answer source for a valid code has changed.
        """

        keys = self.answers_with_code()
        for key in keys:
            if key.has_changed_source():
                return True
        return False

    # Actions
    def submit(self, user_or_request, language=None, **kwargs):
        if language and self.language:
            if language != self.language:
                args = language, self.language
                raise ValueError('cannot set language: %r != %r' % args)
        if self.language:
            language = self.language
        language = get_programming_language(language)
        return super().submit(user_or_request, language=language, **kwargs)

    def run_post_grading(self, **kwargs):
        """
        Runs post tests for all submissions made to this question.
        """

        for response in self.responses.all():
            response.run_post_grading(tests=self.post_tests_expanded, **kwargs)
        self.closed = True
        self.save()

    def nav_section_for_activity(self, request):
        url = self.get_absolute_url
        section = NavSection(__('Question'),
                             url(),
                             title=__('Back to question'))
        if self.rules.test(request.user, 'activities.edit_activity'):
            section.add_link(__('Edit'),
                             self.get_admin_url(),
                             title=__('Edit question'))
        section.add_link(__('Submissions'),
                         url('submissions'),
                         title=__('View your submissions'))
        return section

    # Serving pages and routing
    template = 'questions/coding_io/detail.jinja2'
    template_submissions = 'questions/coding_io/submissions.jinja2'
    template_statistics = 'questions/coding_io/statistics.jinja2'
    template_debug = 'questions/coding_io/debug.jinja2'

    def get_context(self, request, *args, **kwargs):
        context = dict(super().get_context(request, *args, **kwargs),
                       form=True)

        # Select default mode for the ace editor
        if self.language:
            context['default_mode'] = self.language.ace_mode()
        else:
            context['default_mode'] = get_config('CODESCHOOL_DEFAULT_ACE_MODE',
                                                 'python')

        # Enable language selection
        if self.language is None:
            context['select_language'] = True
            context['languages'] = ProgrammingLanguage.supported.all()
        else:
            context['select_language'] = False

        return context

    def serve_ajax_submission(self,
                              client,
                              source=None,
                              language=None,
                              **kwargs):
        """
        Handles student responses via AJAX and a srvice program.
        """

        # User must choose language
        if not language or language == '-----':
            if self.language is None:
                fmt = _('Error'), _('Please select the correct language')
                client.dialog(
                    '<p class="dialog-text"><h2>%s</h2><p>%s</p></p>' % fmt)
                return None
            language = self.language
        else:
            language = get_programming_language(language)

        return super().serve_ajax_submission(
            client=client,
            language=language,
            source=source,
        )

    @srvice.route(r'^placeholder/$')
    def route_placeholder(self, request, language):
        """
        Return the placeholder code for some language.
        """

        return self.get_placehoder(language)

    #
    # Actions
    #
    def regrade_post(self):
        """
        Regrade all submissions using the post tests.
        """

        self.responses.regrade_with(self.post_tests_expanded)

    def action_expand_tests(self, client, *args, **kwargs):
        self._expand_tests()
        pre = escape(self.pre_tests_expanded_source)
        post = escape(self.post_tests_expanded_source)
        client.dialog('<h2>Pre-tests</h2><pre>%s</pre>'
                      '<h2>Post-test</h2><pre>%s</pre>' % (pre, post))

    def action_grade_with_post_tests(self, client, *args, **kwargs):
        self.regrade_post()
        client.dialog('<p>Successful operation!</p>')
Exemplo n.º 12
0
class Submission(ResponseDataMixin, FeedbackDataMixin, models.CopyMixin,
                 models.StatusModel, models.TimeStampedModel,
                 models.PolymorphicModel):
    """
    Represents a student's simple submission in response to some activity.

    Submissions can be in 4 different states:

    pending:
        The response has been sent, but was not graded. Grading can be manual or
        automatic, depending on the activity.
    waiting:
        Waiting for manual feedback.
    incomplete:
        For long-term activities, this tells that the student started a response
        and is completing it gradually, but the final response was not achieved
        yet.
    invalid:
        The response has been sent, but contains malformed data.
    done:
        The response was graded and evaluated and it initialized a feedback
        object.

    A response always starts at pending status. We can request it to be graded
    by calling the :func:`Response.autograde` method. This method must raise
    an InvalidResponseError if the response is invalid or ManualGradingError if
    the response subclass does not implement automatic grading.
    """
    class Meta:
        verbose_name = _('submission')
        verbose_name_plural = _('submissions')

    # Feedback messages
    MESSAGE_OK = _('*Congratulations!* Your response is correct!')
    MESSAGE_OK_WITH_PENALTIES = _(
        'Your response is correct, but you did not achieved the maximum grade.'
    )
    MESSAGE_WRONG = _('I\'m sorry, your response is wrong.')
    MESSAGE_PARTIAL = _(
        'Your answer is partially correct: you achieved only %(grade)d%% of '
        'the total grade.')
    MESSAGE_NOT_GRADED = _('Your response has not been graded yet!')

    # Status
    STATUS_PENDING = 'pending'
    STATUS_INCOMPLETE = 'incomplete'
    STATUS_WAITING = 'waiting'
    STATUS_INVALID = 'invalid'
    STATUS_DONE = 'done'

    # Fields
    STATUS = models.Choices(
        (STATUS_PENDING, _('pending')),
        (STATUS_INCOMPLETE, _('incomplete')),
        (STATUS_WAITING, _('waiting')),
        (STATUS_INVALID, _('invalid')),
        (STATUS_DONE, _('done')),
    )

    response = models.ParentalKey(
        'Response',
        related_name='submissions',
    )
    given_grade = models.DecimalField(
        _('percentage of maximum grade'),
        help_text=_(
            'This grade is given by the auto-grader and represents the grade '
            'for the response before accounting for any bonuses or penalties.'
        ),
        max_digits=6,
        decimal_places=3,
        blank=True,
        null=True,
    )
    final_grade = models.DecimalField(
        _('final grade'),
        help_text=_(
            'Similar to given_grade, but can account for additional factors '
            'such as delay penalties or for any other reason the teacher may '
            'want to override the student\'s grade.'),
        max_digits=6,
        decimal_places=3,
        blank=True,
        null=True,
    )
    manual_override = models.BooleanField(default=False)
    points = models.IntegerField(default=0)
    score = models.IntegerField(default=0)
    stars = models.FloatField(default=0)
    objects = SubmissionManager()

    # Status properties
    is_done = property(lambda x: x.status == x.STATUS_DONE)
    is_pending = property(lambda x: x.status == x.STATUS_PENDING)
    is_waiting = property(lambda x: x.status == x.STATUS_WAITING)
    is_invalid = property(lambda x: x.status == x.STATUS_INVALID)

    @property
    def is_correct(self):
        if self.given_grade is None:
            raise AttributeError('accessing attribute of non-graded response.')
        else:
            return self.given_grade == 100

    # Delegate properties
    activity = delegate_to('response')
    activity_id = delegate_to('response')
    activity_page = delegate_to('response')
    activity_page_id = delegate_to('response')
    user = delegate_to('response')
    user_id = delegate_to('response')
    stars_total = delegate_to('activity')
    points_total = delegate_to('activity')

    @classmethod
    def response_data_hash(cls, response_data):
        """
        Computes a hash for the response_data attribute.

        Data must be given as a JSON-like structure or as a string of JSON data.
        """

        if response_data:
            if isinstance(response_data, str):
                data = response_data
            else:
                data = json.dumps(response_data, default=json_default)
            return md5hash(data)
        return ''

    def __init__(self, *args, **kwargs):
        # Django is loading object from the database -- we step out the way
        if args and not kwargs:
            super().__init__(*args, **kwargs)
            return

        # We create the response_data and feedback_data manually always using
        # copies of passed dicts. We save these variables here, init object and
        # then copy this data to the initialized dictionaries
        response_data = kwargs.pop('response_data', None) or {}
        feedback_data = kwargs.pop('feedback_data', None) or {}

        # This part makes a Submission instance initialize from a user +
        # activity instead of requiring a response object. The response is
        # automatically created on demand.
        user = kwargs.pop('user', None)
        if 'response' in kwargs and user and user != kwargs['response'].user:
            response_user = kwargs['response'].user
            raise ValueError('Inconsistent user definition: %s vs. %s' %
                             (user, response_user))
        elif 'response' not in kwargs and user:
            try:
                activity = kwargs.pop('activity')
            except KeyError:
                raise TypeError(
                    '%s objects bound to a user must also provide an '
                    'activity parameter.' % type(self).__name__)
            else:
                # User-bound constructor tries to obtain the response object by
                # searching for an specific (user, activity) tuple.
                response, created = Response.objects.get_or_create(
                    user=user, activity=activity)
                kwargs['response'] = response

        if 'context' in kwargs or 'activity' in kwargs:
            raise TypeError(
                'Must provide an user to instantiate a bound submission.')
        super().__init__(*args, **kwargs)

        # Now that we have initialized the submission, we fill the data
        # passed in the response_data and feedback_data dictionaries.
        self.response_data = dict(self.response_data or {}, **response_data)
        self.feedback_data = dict(self.response_data or {}, **feedback_data)

    def __str__(self):
        if self.given_grade is None:
            grade = self.status
        else:
            grade = '%s pts' % self.final_grade
        user = self.user
        activity = self.activity
        name = self.__class__.__name__
        return '<%s: %s by %s (%s)>' % (name, activity, user, grade)

    def __html__(self):
        """
        A string of html source representing the feedback.
        """

        if self.is_done:
            data = {'grade': (self.final_grade or 0)}

            if self.final_grade == 100:
                return markdown(self.MESSAGE_OK)
            elif self.given_grade == 100:
                return markdown(self.ok_with_penalties_message)
            elif not self.given_grade:
                return markdown(self.MESSAGE_WRONG)
            else:
                return markdown(self.MESSAGE_PARTIAL % data)
        else:
            return markdown(self.MESSAGE_NOT_GRADED)

    def save(self, *args, **kwargs):
        if not self.response_hash:
            self.response_hash = self.response_hash_from_data(
                self.response_hash)
        super().save(*args, **kwargs)

    def final_points(self):
        """
        Return the amount of points awarded to the submission after
        considering all penalties and bonuses.
        """

        return self.points

    def final_stars(self):
        """
        Return the amount of stars awarded to the submission after
        considering all penalties and bonuses.
        """

        return self.stars

    def given_stars(self):
        """
        Compute the number of stars that should be awarded to the submission
        without taking into account bonuses and penalties.
        """

        return self.stars_total * (self.given_grade / 100)

    def given_points(self):
        """
        Compute the number of points that should be awarded to the submission
        without taking into account bonuses and penalties.
        """

        return int(self.points_total * (self.given_grade / 100))

    def feedback(self, commit=True, force=False, silent=False):
        """
        Return the feedback object associated to the given response.

        This method may trigger the autograde() method, if grading was not
        performed yet. If you want to defer database access, call it with
        commit=False to prevent saving any modifications to the response object
        to the database.

        The commit, force and silent arguments have the same meaning as in
        the :func:`Submission.autograde` method.
        """

        if self.status == self.STATUS_PENDING:
            self.autograde(commit=commit, force=force, silent=silent)
        elif self.status == self.STATUS_INVALID:
            raise self.feedback_data
        elif self.status == self.STATUS_WAITING:
            return None
        return self.feedback_data

    def autograde(self, commit=True, force=False, silent=False):
        """
        Performs automatic grading.

        Response subclasses must implement the autograde_compute() method in
        order to make automatic grading work. This method may write any
        relevant information to the `feedback_data` attribute and must return
        a numeric value from 0 to 100 with the given automatic grade.

        Args:
            commit:
                If false, prevents saving the object when grading is complete.
                The user must save the object manually after calling this
                method.
            force:
                If true, force regrading the item even if it has already been
                graded. The default behavior is to ignore autograde from a
                graded submission.
            silent:
                Prevents the submission_graded_signal from triggering in the
                end of a successful grading.
        """

        if self.status == self.STATUS_PENDING or force:
            # Evaluate grade using the autograde_value() method of subclass.
            try:
                value = self.autograde_value()
            except self.InvalidSubmissionError as ex:
                self.status = self.STATUS_INVALID
                self.feedback_data = ex
                self.given_grade = self.final_grade = decimal.Decimal(0)
                if commit:
                    self.save()
                raise

            # If no value is returned, change to STATUS_WAITING. This probably
            # means that response is partial and we need other submissions to
            # complete the final response
            if value is None:
                self.status = self.STATUS_WAITING

            # A regular submission has a decimal grade value. We save it and
            # change state to STATUS_DONE
            else:
                self.given_grade = decimal.Decimal(value)
                if self.final_grade is None:
                    self.final_grade = self.given_grade
                self.status = self.STATUS_DONE

            # Commit results
            if commit and self.pk:
                self.save(update_fields=[
                    'status', 'feedback_data', 'given_grade', 'final_grade'
                ])
            elif commit:
                self.save()

            # If STATUS_DONE, we submit the submission_graded signal.
            if self.status == self.STATUS_DONE:
                self.stars = self.given_stars()
                self.points = self.given_points()
                self.response.register_submission(self)
                if not silent:
                    submission_graded_signal.send(
                        Submission,
                        submission=self,
                        given_grade=self.given_grade,
                        automatic=True,
                    )

        elif self.status == self.STATUS_INVALID:
            raise self.feedback_data

    def manual_grade(self, grade, commit=True, raises=False, silent=False):
        """
        Saves result of manual grading.

        Args:
            grade (number):
                Given grade, as a percentage value.
            commit:
                If false, prevents saving the object when grading is complete.
                The user must save the object manually after calling this
                method.
            raises:
                If submission has already been graded, raises a GradingError.
            silent:
                Prevents the submission_graded_signal from triggering in the
                end of a successful grading.
        """

        if self.status != self.STATUS_PENDING and raises:
            raise GradingError('Submission has already been graded!')

        raise NotImplementedError('TODO')

    def autograde_value(self):
        """
        This method should be implemented in subclasses.
        """

        raise ImproperlyConfigured(
            'Response subclass %r must implement the autograde_value().'
            'This method should perform the automatic grading and return the '
            'resulting grade. Any additional relevant feedback data might be '
            'saved to the `feedback_data` attribute, which is then is pickled '
            'and saved into the database.' % type(self).__name__)

    def regrade(self, method, commit=True):
        """
        Recompute the grade for the given submission.

        If status != 'done', it simply calls the .autograde() method. Otherwise,
        it accept different strategies for updating to the new grades:
            'update':
                Recompute the grades and replace the old values with the new
                ones. Only saves the submission if the feedback_data or the
                given_grade attributes change.
            'best':
                Only update if the if the grade increase.
            'worst':
                Only update if the grades decrease.
            'best-feedback':
                Like 'best', but updates feedback_data even if the grades
                change.
            'worst-feedback':
                Like 'worst', but updates feedback_data even if the grades
                change.

        Return a boolean telling if the regrading was necessary.
        """
        if self.status != self.STATUS_DONE:
            return self.autograde()

        # We keep a copy of the state, if necessary. We only have to take some
        # action if the state changes.
        def rollback():
            self.__dict__.clear()
            self.__dict__.update(state)

        state = self.__dict__.copy()
        self.autograde(force=True, commit=False)

        # Each method deals with the new state in a different manner
        if method == 'update':
            if state != self.__dict__:
                if commit:
                    self.save()
                return False
            return True
        elif method in ('best', 'best-feedback'):
            if self.given_grade <= state.get('given_grade', 0):
                new_feedback_data = self.feedback_data
                rollback()
                if new_feedback_data != self.feedback_data:
                    self.feedback_data = new_feedback_data
                    if commit:
                        self.save()
                    return True
                return False
            elif commit:
                self.save()
            return True

        elif method in ('worst', 'worst-feedback'):
            if self.given_grade >= state.get('given_grade', 0):
                new_feedback_data = self.feedback_data
                rollback()
                if new_feedback_data != self.feedback_data:
                    self.feedback_data = new_feedback_data
                    if commit:
                        self.save()
                    return True
                return False
            elif commit:
                self.save()
            return True
        else:
            rollback()
            raise ValueError('invalid method: %s' % method)
Exemplo n.º 13
0
class Progress(models.CopyMixin, models.StatusModel, models.TimeStampedModel,
               models.PolymorphicModel):
    """
    When an user starts an activity it opens a Progress object which control
    all submissions to the given activity.

    The Progress object also manages individual submissions that may span
    several http requests.
    """
    class Meta:
        unique_together = [('user', 'activity_page')]
        verbose_name = _('student progress')
        verbose_name_plural = _('student progress list')

    STATUS_OPENED = 'opened'
    STATUS_CLOSED = 'closed'
    STATUS_INCOMPLETE = 'incomplete'
    STATUS_WAITING = 'waiting'
    STATUS_INVALID = 'invalid'
    STATUS_DONE = 'done'

    STATUS = models.Choices(
        (STATUS_OPENED, _('opened')),
        (STATUS_CLOSED, _('closed')),
    )

    user = models.ForeignKey(models.User, on_delete=models.CASCADE)
    activity_page = models.ForeignKey(models.Page, on_delete=models.CASCADE)
    final_grade_pc = models.DecimalField(
        _('final score'),
        max_digits=6,
        decimal_places=3,
        default=Decimal,
        help_text=_(
            'Final grade given to considering all submissions, penalties, etc.'
        ),
    )
    given_grade_pc = models.DecimalField(
        _('grade'),
        max_digits=6,
        decimal_places=3,
        default=Decimal,
        help_text=_('Final grade before applying any modifier.'),
    )
    finished = models.DateTimeField(blank=True, null=True)
    best_submission = models.ForeignKey('Submission',
                                        blank=True,
                                        null=True,
                                        related_name='+')
    points = models.IntegerField(default=0)
    score = models.IntegerField(default=0)
    stars = models.FloatField(default=0.0)
    is_correct = models.BooleanField(default=bool)
    has_submissions = models.BooleanField(default=bool)
    has_feedback = models.BooleanField(default=bool)
    has_post_tests = models.BooleanField(default=bool)
    objects = ProgressManager()

    #: The number of submissions
    num_submissions = property(lambda x: x.submissions.count())

    #: Specific activity reference
    activity = property(lambda x: x.activity_page.specific)
    activity_id = property(lambda x: x.activity_page_id)

    #: Has progress mixin interface
    username = property(lambda x: x.user.username)

    def __repr__(self):
        return '<%s: %s>' % (self.__class__.__name__, self)

    def __str__(self):
        tries = self.num_submissions
        user = self.user
        activity = self.activity
        grade = '%s pts' % (self.final_grade_pc or 0)
        fmt = '%s by %s (%s, %s tries)'
        return fmt % (activity, user, grade, tries)

    def __hash__(self):
        return hash(self.id)

    def __eq__(self, other):
        if isinstance(other, Progress):
            if self.pk is None:
                return False
            else:
                return self.pk == other.pk
        return NotImplemented

    def submit(self, request, recycle=True, **kwargs):
        """
        Creates new submission.
        """

        submission_class = self.activity.submission_class
        submission = submission_class(progress=self, **kwargs)
        submission.ip_address = get_ip(request)

        if not recycle:
            submission.save()
            return submission

        # Collect all submissions with the same hash as current one
        recyclable = submission_class.objects\
            .filter(progress=self, hash=submission.compute_hash()) \
            .order_by('created')

        # Then check if any submission is actually equal to the current amongst
        # all candidates
        for possibly_equal in recyclable:
            if submission.is_equal(possibly_equal):
                possibly_equal.recycled = True
                possibly_equal.bump_recycles()
                return possibly_equal
        else:
            submission.save()
            return submission

    def register_feedback(self, feedback):
        """
        This method is called after a submission is graded and produces a
        feedback.
        """

        submission = feedback.submission
        self.update_grades_from_feedback(feedback)

        if not self.activity.has_submissions:
            print('first submission')
            if feedback.is_correct:
                print('first correct submission')

    def update_grades_from_feedback(self, feedback):
        """
        Update grades from the current progress object from the given feedback.
        """

        # Update grades
        if self.given_grade_pc < (feedback.given_grade_pc or 0):
            self.given_grade_pc = feedback.given_grade_pc

        # TODO: decide better update strategy
        if self.final_grade_pc < feedback.final_grade_pc:
            self.final_grade_pc = feedback.final_grade_pc

        # # Register points and stars associated with submission.
        # score_kwargs = {}
        # final_points = feedback.final_points()
        # final_stars = feedback.final_stars()
        # if final_points > self.points:
        #     score_kwargs['points'] = final_points - self.points
        #     self.points = final_points
        # if final_stars > self.stars:
        #     score_kwargs['stars'] = final_stars - self.stars
        #     self.stars = final_stars
        #
        # # If some score has changed, we save the update fields and update the
        # # corresponding UserScore object
        # if score_kwargs:
        #     from codeschool.gamification.models import UserScore
        #     self.save(update_fields=score_kwargs.keys())
        #     score_kwargs['diff'] = True
        #     UserScore.update(self.user, self.activity_page, **score_kwargs)

        # Update the is_correct field
        self.is_correct = self.is_correct or feedback.is_correct
        self.save()

    def update_from_submissions(self,
                                grades=True,
                                score=True,
                                commit=True,
                                refresh=False):
        """
        Update grades and gamification scores for all submissions.

        Args:
            grades, score (bool):
                choose to update final grades and/or final scores.
            commit:
                if True (default), save changes to database.
            refresh:
                if True (default), recompute grade from scratch.
        """

        submissions = self.submissions.all()
        if refresh and submissions.count():
            first = submissions.first()
            if grades:
                self.final_grade_pc = first.given_grade_pc
                self.given_grade_pc = first.given_grade_pc
            if score:
                self.points = first.points
                self.stars = first.stars
                self.score = first.score

        for submission in submissions:
            if grades:
                submission.update_response_grades(commit=False)
            if score:
                submission.update_response_score(commit=False)

        if commit:
            self.save()

    def regrade(self, method=None, force_update=False):
        """
        Return the final grade for the user using the given method.

        If not method is given, it uses the default grading method for the
        activity.
        """

        activity = self.activity

        # Choose grading method
        if method is None and self.final_grade_pc is not None:
            return self.final_grade_pc
        elif method is None:
            grading_method = activity.grading_method
        else:
            grading_method = GradingMethod.from_name(activity.owner, method)

        # Grade response. We save the result to the final_grade_pc attribute if
        # no explicit grading method is given.
        grade = grading_method.grade(self)
        if method is None and (force_update or self.final_grade_pc is None):
            self.final_grade_pc = grade
        return grade
Exemplo n.º 14
0
class NumericResponse(Response):
    value = models.FloatField(
        _('Value'),
        help_text=_('Result (it must be a number)')
    )
Exemplo n.º 15
0
class Response(models.CopyMixin, models.StatusModel, models.TimeStampedModel,
               models.PolymorphicModel, models.ClusterableModel):
    """
    When an user starts an activity it opens a Session object that controls
    how responses to the given activity will be submitted.

    The session object manages individual response submissions that may span
    several http requests.
    """
    class Meta:
        unique_together = [('user', 'activity_page')]
        verbose_name = _('final response')
        verbose_name_plural = _('final responses')

    STATUS_OPENED = 'opened'
    STATUS_CLOSED = 'closed'
    STATUS_INCOMPLETE = 'incomplete'
    STATUS_WAITING = 'waiting'
    STATUS_INVALID = 'invalid'
    STATUS_DONE = 'done'

    STATUS = models.Choices(
        (STATUS_OPENED, _('opened')),
        (STATUS_CLOSED, _('closed')),
    )

    user = models.ForeignKey(
        models.User,
        related_name='responses',
        on_delete=models.CASCADE,
    )
    activity_page = models.ForeignKey(
        models.Page,
        related_name='responses',
        on_delete=models.CASCADE,
    )
    grade = models.DecimalField(
        _('given grade'),
        max_digits=6,
        decimal_places=3,
        blank=True,
        null=True,
        default=0,
        help_text=_(
            'Grade given to response considering all submissions, penalties, '
            'etc.'),
    )
    finish_time = models.DateTimeField(
        blank=True,
        null=True,
    )
    points = models.IntegerField(default=0)
    score = models.IntegerField(default=0)
    stars = models.FloatField(default=0.0)
    is_finished = models.BooleanField(default=bool)
    is_correct = models.BooleanField(default=bool)
    objects = ResponseManager()

    #: The number of submissions in the current session.
    num_submissions = property(lambda x: x.submissions.count())

    #: Specific activity reference
    activity = property(lambda x: x.activity_page.specific)
    activity_id = property(lambda x: x.activity_page_id)

    @activity.setter
    def activity(self, value):
        self.activity_page = value.page_ptr

    @classmethod
    def _get_response(cls, user, activity):
        """
        Return the response object associated with the given
        user/activity.

        Create a new response object if it does not exist.
        """

        if user is None or activity is None:
            raise TypeError(
                'Response objects must be bound to an user or activity.')

        response, create = Response.objects.get_or_create(user=user,
                                                          activity=activity)
        return response

    def __repr__(self):
        tries = self.num_submissions
        user = self.user
        activity = self.activity
        class_name = self.__class__.__name__
        grade = '%s pts' % (self.grade or 0)
        fmt = '<%s: %s by %s (%s, %s tries)>'
        return fmt % (class_name, activity, user, grade, tries)

    def __str__(self):
        return repr(self)

    def __hash__(self):
        return hash(self.id)

    def __eq__(self, other):
        if isinstance(other, Response):
            if self.pk is None:
                return False
            else:
                return self.pk == other.pk
        return NotImplemented

    def register_submission(self, submission):
        """
        This method is called when a submission is graded.
        """

        assert submission.response_id == self.id

        # Register points and stars associated with submission.
        score_kwargs = {}
        final_points = submission.final_points()
        final_stars = submission.final_stars()
        if final_points > self.points:
            score_kwargs['points'] = final_points - self.points
            self.points = final_points
        if final_stars > self.stars:
            score_kwargs['stars'] = final_stars - self.stars
            self.stars = final_stars

        # If some score has changed, we save the update fields and update the
        # corresponding UserScore object
        if score_kwargs:
            from codeschool.lms.gamification.models import UserScore
            self.save(update_fields=score_kwargs.keys())
            score_kwargs['diff'] = True
            UserScore.update(self.user, self.activity_page, **score_kwargs)

    def regrade(self, method=None, force_update=False):
        """
        Return the final grade for the user using the given method.

        If not method is given, it uses the default grading method for the
        activity.
        """

        activity = self.activity

        # Choose grading method
        if method is None and self.final_grade is not None:
            return self.final_grade
        elif method is None:
            grading_method = activity.grading_method
        else:
            grading_method = GradingMethod.from_name(activity.owner, method)

        # Grade response. We save the result to the final_grade attribute if
        # no explicit grading method is given.
        grade = grading_method.grade(self)
        if method is None and (force_update or self.final_grade is None):
            self.final_grade = grade
        return grade