Beispiel #1
0
class Choice(models.Orderable):
    question = models.ParentalKey(MultipleChoiceQuestion,
                                  related_name='choices')
    text = models.RichTextField(_('Choice description'))
    uuid = models.UUIDField(default=uuid.uuid4)
    value = models.DecimalField(
        _('Value'), decimal_places=1, max_digits=4,
        validators=[validators.MinValueValidator(0),
                    validators.MaxValueValidator(100)],
        help_text=_(
            'Grade given for users that choose these option (value=0, for an '
            'incorrect choice and value=100 for a correct one).'
        ),
        default=0,
    )

    def __repr__(self):
        return 'Choice(value=%s, ...)' % self.value

    panels = [
        panels.FieldPanel('text'),
        panels.FieldPanel('value'),
    ]
Beispiel #2
0
class AnswerKey(models.Model):
    """
    Represents an answer to some question given in some specific computer
    language plus the placeholder text that should be displayed.
    """

    NULL_SOURCE_HASH = md5hash('')

    class ValidationError(Exception):
        pass

    class Meta:
        verbose_name = _('answer key')
        verbose_name_plural = _('answer keys')
        unique_together = [('question', 'language')]

    question = models.ParentalKey(
        CodingIoQuestion,
        related_name='answers'
    )
    language = models.ForeignKey(
        ProgrammingLanguage,
        related_name='+',
    )
    source = models.TextField(
        _('answer source code'),
        blank=True,
        help_text=_(
            'Source code for the correct answer in the given programming '
            'language.'
        ),
    )
    placeholder = models.TextField(
        _('placeholder source code'),
        blank=True,
        help_text=_(
            'This optional field controls which code should be placed in '
            'the source code editor when a question is opened. This is '
            'useful to put boilerplate or even a full program that the '
            'student should modify. It is possible to configure a global '
            'per-language boilerplate and leave this field blank.'
        ),
    )
    source_hash = models.CharField(
        max_length=32,
        default=NULL_SOURCE_HASH,
        help_text=_('Hash computed from the reference source'),
    )
    error_message = models.TextField(
        _('error message'),
        blank=True,
        help_text=_(
            'If an error is found on post-validation, an error message is '
            'stored in here.'
        )
    )

    def __repr__(self):
        return '<AnswerKey: %s>' % self

    def __str__(self):
        try:
            title = self.question.title
        except:
            title = '<untitled>'
        return '%s (%s)' % (title, self.language)

    def save(self, *args, **kwargs):
        self.source_hash = md5hash(self.source)
        super().save(*args, **kwargs)

    def clean(self):
        try:
            check_syntax(self.source, lang=self.language.ejudge_ref())
        except SyntaxError as ex:
            msg = _('Invalid syntax: %(msg)') % {'msg': str(ex)}
            raise ValidationError({'source': msg})
        super().clean()

        # Validation is async:
        #
        # We first run basic validations in the foreground and later attempt
        # at more detailed validations that requires us to run source code (and
        # thus possibly wait a long time).
        #
        # If this later validation step encounters errors, it saves them on
        # the model instance. The next time the model runs, we can re-raise
        # them on the interface. The user has an option to bypass these checks.
        # Changing the code or the iospec entries should expire these
        # errors.
        if self.error_message and not self.is_ignoring_validation_errors():
            raise ValidationError({'source': mark_safe(self.error_message)})

    def is_ignoring_validation_errors(self):
        """
        True to ignore errors found in post-validation.
        """

        return self.question.ignore_validation_errors

    def set_error_message(self, message):
        """
        Saves error message.
        """

        try:
            self.error_message = message.__html__()
        except AttributeError:
            self.error_message = escape(message)

    def has_changed_source(self):
        """
        Return True if source is not consistent with its hash.
        """

        return self.source_hash != md5hash(self.source)

    #
    #
    #

    def single_reference(self):
        """
        Return True if it is the only answer key in the set that defines a
        source attribute.
        """

        if not self.source:
            return False

        try:
            return self.question.answers.has_program().get() == self
        except self.DoesNotExist:
            return False

    # Wagtail admin
    panels = [
        panels.FieldPanel('language'),
        panels.FieldPanel('source'),
        panels.FieldPanel('placeholder'),
    ]
Beispiel #3
0
class AnswerKey(models.Model):
    """
    Represents an answer to some question given in some specific computer
    language plus the placeholder text that should be displayed.
    """
    class ValidationError(Exception):
        pass

    class Meta:
        verbose_name = _('answer key')
        verbose_name_plural = _('answer keys')
        unique_together = [('question', 'language')]

    question = models.ParentalKey(CodingIoQuestion, related_name='answers')
    language = models.ForeignKey(
        ProgrammingLanguage,
        related_name='+',
    )
    source = models.TextField(
        _('answer source code'),
        blank=True,
        help_text=_(
            'Source code for the correct answer in the given programming '
            'language.'),
    )
    placeholder = models.TextField(
        _('placeholder source code'),
        blank=True,
        help_text=_(
            'This optional field controls which code should be placed in '
            'the source code editor when a question is opened. This is '
            'useful to put boilerplate or even a full program that the '
            'student should modify. It is possible to configure a global '
            'per-language boilerplate and leave this field blank.'),
    )
    source_hash = models.CharField(
        max_length=32,
        blank=True,
        help_text=_('Hash computed from the reference source'),
    )
    iospec_hash = models.CharField(
        max_length=32,
        blank=True,
        help_text=_('Hash computed from reference source and iospec_size.'),
    )
    iospec_source = models.TextField(
        _('expanded source'),
        blank=True,
        help_text=_(
            'Iospec source for the expanded testcase. This data is computed '
            'from the reference iospec source and the given reference program '
            'to expand the outputs from the given inputs.'))

    objects = AnswerKeyQueryset.as_manager()
    iospec_size = property(lambda x: x.question.iospec_size)

    @lazy
    def iospec(self):
        return parse_iospec(self.iospec_source)

    def __repr__(self):
        return '<AnswerKeyItem: %s (%s)>' % (self.question, self.language)

    def __str__(self):
        return '%s (%s)>' % (self.question, self.language)

    def clean(self):
        super().clean()

        if self.question is None:
            return

        # We only have to update if the parent's hash is incompatible with the
        # current hash and the source field is defined. We make this test to
        # perform the expensive code re-evaluation only when strictly necessary
        parent_hash = self.parent_hash()
        source_hash = md5hash(self.source)

        if parent_hash != self.iospec_hash or source_hash != self.source_hash:
            try:
                iospec = self.question.iospec
            except Exception:
                raise ValidationError(
                    _('cannot register answer key for question with invalid '
                      'iospec.'))
            result = self._update_state(iospec, self.source, self.language)
            self.iospec_source = result.source()
            self.source_hash = source_hash
            self.iospec_hash = parent_hash

    def update(self, commit=True):
        """
        Update the internal iospec source and hash keys to match the given
        parent iospec value.

        It raises a ValidationError if the source code is invalid.
        """

        iospec = self.question.iospec
        result = self._update_state(iospec, self.source, self.language)
        self.iospec_source = result.source()
        self.source_hash = md5hash(self.source)
        self.iospec_hash = self.parent_hash()
        if commit:
            self.save()

    def _update_state(self, iospec, source, language):
        """
        Worker function for the .update() and .clean() methods.

        Update the hashes and the expanded iospec_source for the answer key.
        """

        # We expand inputs and compute the result for the given source code
        # string
        language = language.ejudge_ref()
        if len(iospec) <= self.iospec_size:
            iospec.expand_inputs(self.iospec_size)
        result = run_code(source, iospec, language)

        # Check if the result has runtime or build errors
        if result.has_errors:
            for testcase in iospec:
                result = run_code(source, testcase, language)
                if result.has_errors:
                    error_dic = {
                        'error': escape(result.get_error_message()),
                        'iospec': escape(testcase.source())
                    }
                    raise ValidationError(
                        {'source': mark_safe(ERROR_TEMPLATE % error_dic)})

        # The source may run fine, but still give results that are inconsistent
        # with the given testcases. This will only be noticed if the user
        # provides at least one simple IO test case.
        for (expected, value) in zip(iospec, result):
            expected_source = expected.source().rstrip()
            value_source = value.source().rstrip()
            if expected.is_simple and expected_source != value_source:
                msg = _(
                    '<div class="error-message">'
                    'Your program produced invalid results in this tescase:\n'
                    '<br>\n'
                    '<pre>%(diff)s</pre>\n'
                    '</div>')
                error = {
                    'diff':
                    '\n'.join(
                        differ.compare(expected.source().rstrip().splitlines(),
                                       value.source().rstrip().splitlines()))
                }
                msg = mark_safe(msg % error)
                raise ValidationError({'source': msg})

        # Now we save the result because it has all the computed expansions
        return result

    def save(self, *args, **kwds):
        if 'iospec' in self.__dict__:
            self.iospec_source = self.iospec.source()
        super().save(*args, **kwds)

    def run(self, source=None, iospec=None):
        """
        Runs the given source against the given iospec.

        If no source is given, use the reference implementation.

        If no iospec is given, user the default. The user may also pass a list
        of input strings.
        """

        source = source or self.source
        iospec = iospec or self.iospec
        if not source:
            raise ValueError('a source code string must be provided.')

        return run_code(source, iospec, self.language.ejudge_ref())

    def parent_hash(self):
        """
        Return the iospec hash from the question current iospec/iospec_size.
        """

        parent = self.question
        return md5hash(parent.iospec_source + str(parent.iospec_size))

    # Wagtail admin
    panels = [
        panels.FieldPanel('language'),
        panels.FieldPanel('source'),
        panels.FieldPanel('placeholder'),
    ]
Beispiel #4
0
class FormField(AbstractFormField):
    page = models.ParentalKey('FormPage', related_name='form_fields')
Beispiel #5
0
class Submission(ResponseDataMixin, FeedbackDataMixin, models.CopyMixin,
                 models.StatusModel, models.TimeStampedModel,
                 models.PolymorphicModel):
    """
    Represents a student's simple submission in response to some activity.

    Submissions can be in 4 different states:

    pending:
        The response has been sent, but was not graded. Grading can be manual or
        automatic, depending on the activity.
    waiting:
        Waiting for manual feedback.
    incomplete:
        For long-term activities, this tells that the student started a response
        and is completing it gradually, but the final response was not achieved
        yet.
    invalid:
        The response has been sent, but contains malformed data.
    done:
        The response was graded and evaluated and it initialized a feedback
        object.

    A response always starts at pending status. We can request it to be graded
    by calling the :func:`Response.autograde` method. This method must raise
    an InvalidResponseError if the response is invalid or ManualGradingError if
    the response subclass does not implement automatic grading.
    """
    class Meta:
        verbose_name = _('submission')
        verbose_name_plural = _('submissions')

    # Feedback messages
    MESSAGE_OK = _('*Congratulations!* Your response is correct!')
    MESSAGE_OK_WITH_PENALTIES = _(
        'Your response is correct, but you did not achieved the maximum grade.'
    )
    MESSAGE_WRONG = _('I\'m sorry, your response is wrong.')
    MESSAGE_PARTIAL = _(
        'Your answer is partially correct: you achieved only %(grade)d%% of '
        'the total grade.')
    MESSAGE_NOT_GRADED = _('Your response has not been graded yet!')

    # Status
    STATUS_PENDING = 'pending'
    STATUS_INCOMPLETE = 'incomplete'
    STATUS_WAITING = 'waiting'
    STATUS_INVALID = 'invalid'
    STATUS_DONE = 'done'

    # Fields
    STATUS = models.Choices(
        (STATUS_PENDING, _('pending')),
        (STATUS_INCOMPLETE, _('incomplete')),
        (STATUS_WAITING, _('waiting')),
        (STATUS_INVALID, _('invalid')),
        (STATUS_DONE, _('done')),
    )

    response = models.ParentalKey(
        'Response',
        related_name='submissions',
    )
    given_grade = models.DecimalField(
        _('percentage of maximum grade'),
        help_text=_(
            'This grade is given by the auto-grader and represents the grade '
            'for the response before accounting for any bonuses or penalties.'
        ),
        max_digits=6,
        decimal_places=3,
        blank=True,
        null=True,
    )
    final_grade = models.DecimalField(
        _('final grade'),
        help_text=_(
            'Similar to given_grade, but can account for additional factors '
            'such as delay penalties or for any other reason the teacher may '
            'want to override the student\'s grade.'),
        max_digits=6,
        decimal_places=3,
        blank=True,
        null=True,
    )
    manual_override = models.BooleanField(default=False)
    points = models.IntegerField(default=0)
    score = models.IntegerField(default=0)
    stars = models.FloatField(default=0)
    objects = SubmissionManager()

    # Status properties
    is_done = property(lambda x: x.status == x.STATUS_DONE)
    is_pending = property(lambda x: x.status == x.STATUS_PENDING)
    is_waiting = property(lambda x: x.status == x.STATUS_WAITING)
    is_invalid = property(lambda x: x.status == x.STATUS_INVALID)

    @property
    def is_correct(self):
        if self.given_grade is None:
            raise AttributeError('accessing attribute of non-graded response.')
        else:
            return self.given_grade == 100

    # Delegate properties
    activity = delegate_to('response')
    activity_id = delegate_to('response')
    activity_page = delegate_to('response')
    activity_page_id = delegate_to('response')
    user = delegate_to('response')
    user_id = delegate_to('response')
    stars_total = delegate_to('activity')
    points_total = delegate_to('activity')

    @classmethod
    def response_data_hash(cls, response_data):
        """
        Computes a hash for the response_data attribute.

        Data must be given as a JSON-like structure or as a string of JSON data.
        """

        if response_data:
            if isinstance(response_data, str):
                data = response_data
            else:
                data = json.dumps(response_data, default=json_default)
            return md5hash(data)
        return ''

    def __init__(self, *args, **kwargs):
        # Django is loading object from the database -- we step out the way
        if args and not kwargs:
            super().__init__(*args, **kwargs)
            return

        # We create the response_data and feedback_data manually always using
        # copies of passed dicts. We save these variables here, init object and
        # then copy this data to the initialized dictionaries
        response_data = kwargs.pop('response_data', None) or {}
        feedback_data = kwargs.pop('feedback_data', None) or {}

        # This part makes a Submission instance initialize from a user +
        # activity instead of requiring a response object. The response is
        # automatically created on demand.
        user = kwargs.pop('user', None)
        if 'response' in kwargs and user and user != kwargs['response'].user:
            response_user = kwargs['response'].user
            raise ValueError('Inconsistent user definition: %s vs. %s' %
                             (user, response_user))
        elif 'response' not in kwargs and user:
            try:
                activity = kwargs.pop('activity')
            except KeyError:
                raise TypeError(
                    '%s objects bound to a user must also provide an '
                    'activity parameter.' % type(self).__name__)
            else:
                # User-bound constructor tries to obtain the response object by
                # searching for an specific (user, activity) tuple.
                response, created = Response.objects.get_or_create(
                    user=user, activity=activity)
                kwargs['response'] = response

        if 'context' in kwargs or 'activity' in kwargs:
            raise TypeError(
                'Must provide an user to instantiate a bound submission.')
        super().__init__(*args, **kwargs)

        # Now that we have initialized the submission, we fill the data
        # passed in the response_data and feedback_data dictionaries.
        self.response_data = dict(self.response_data or {}, **response_data)
        self.feedback_data = dict(self.response_data or {}, **feedback_data)

    def __str__(self):
        if self.given_grade is None:
            grade = self.status
        else:
            grade = '%s pts' % self.final_grade
        user = self.user
        activity = self.activity
        name = self.__class__.__name__
        return '<%s: %s by %s (%s)>' % (name, activity, user, grade)

    def __html__(self):
        """
        A string of html source representing the feedback.
        """

        if self.is_done:
            data = {'grade': (self.final_grade or 0)}

            if self.final_grade == 100:
                return markdown(self.MESSAGE_OK)
            elif self.given_grade == 100:
                return markdown(self.ok_with_penalties_message)
            elif not self.given_grade:
                return markdown(self.MESSAGE_WRONG)
            else:
                return markdown(self.MESSAGE_PARTIAL % data)
        else:
            return markdown(self.MESSAGE_NOT_GRADED)

    def save(self, *args, **kwargs):
        if not self.response_hash:
            self.response_hash = self.response_hash_from_data(
                self.response_hash)
        super().save(*args, **kwargs)

    def final_points(self):
        """
        Return the amount of points awarded to the submission after
        considering all penalties and bonuses.
        """

        return self.points

    def final_stars(self):
        """
        Return the amount of stars awarded to the submission after
        considering all penalties and bonuses.
        """

        return self.stars

    def given_stars(self):
        """
        Compute the number of stars that should be awarded to the submission
        without taking into account bonuses and penalties.
        """

        return self.stars_total * (self.given_grade / 100)

    def given_points(self):
        """
        Compute the number of points that should be awarded to the submission
        without taking into account bonuses and penalties.
        """

        return int(self.points_total * (self.given_grade / 100))

    def feedback(self, commit=True, force=False, silent=False):
        """
        Return the feedback object associated to the given response.

        This method may trigger the autograde() method, if grading was not
        performed yet. If you want to defer database access, call it with
        commit=False to prevent saving any modifications to the response object
        to the database.

        The commit, force and silent arguments have the same meaning as in
        the :func:`Submission.autograde` method.
        """

        if self.status == self.STATUS_PENDING:
            self.autograde(commit=commit, force=force, silent=silent)
        elif self.status == self.STATUS_INVALID:
            raise self.feedback_data
        elif self.status == self.STATUS_WAITING:
            return None
        return self.feedback_data

    def autograde(self, commit=True, force=False, silent=False):
        """
        Performs automatic grading.

        Response subclasses must implement the autograde_compute() method in
        order to make automatic grading work. This method may write any
        relevant information to the `feedback_data` attribute and must return
        a numeric value from 0 to 100 with the given automatic grade.

        Args:
            commit:
                If false, prevents saving the object when grading is complete.
                The user must save the object manually after calling this
                method.
            force:
                If true, force regrading the item even if it has already been
                graded. The default behavior is to ignore autograde from a
                graded submission.
            silent:
                Prevents the submission_graded_signal from triggering in the
                end of a successful grading.
        """

        if self.status == self.STATUS_PENDING or force:
            # Evaluate grade using the autograde_value() method of subclass.
            try:
                value = self.autograde_value()
            except self.InvalidSubmissionError as ex:
                self.status = self.STATUS_INVALID
                self.feedback_data = ex
                self.given_grade = self.final_grade = decimal.Decimal(0)
                if commit:
                    self.save()
                raise

            # If no value is returned, change to STATUS_WAITING. This probably
            # means that response is partial and we need other submissions to
            # complete the final response
            if value is None:
                self.status = self.STATUS_WAITING

            # A regular submission has a decimal grade value. We save it and
            # change state to STATUS_DONE
            else:
                self.given_grade = decimal.Decimal(value)
                if self.final_grade is None:
                    self.final_grade = self.given_grade
                self.status = self.STATUS_DONE

            # Commit results
            if commit and self.pk:
                self.save(update_fields=[
                    'status', 'feedback_data', 'given_grade', 'final_grade'
                ])
            elif commit:
                self.save()

            # If STATUS_DONE, we submit the submission_graded signal.
            if self.status == self.STATUS_DONE:
                self.stars = self.given_stars()
                self.points = self.given_points()
                self.response.register_submission(self)
                if not silent:
                    submission_graded_signal.send(
                        Submission,
                        submission=self,
                        given_grade=self.given_grade,
                        automatic=True,
                    )

        elif self.status == self.STATUS_INVALID:
            raise self.feedback_data

    def manual_grade(self, grade, commit=True, raises=False, silent=False):
        """
        Saves result of manual grading.

        Args:
            grade (number):
                Given grade, as a percentage value.
            commit:
                If false, prevents saving the object when grading is complete.
                The user must save the object manually after calling this
                method.
            raises:
                If submission has already been graded, raises a GradingError.
            silent:
                Prevents the submission_graded_signal from triggering in the
                end of a successful grading.
        """

        if self.status != self.STATUS_PENDING and raises:
            raise GradingError('Submission has already been graded!')

        raise NotImplementedError('TODO')

    def autograde_value(self):
        """
        This method should be implemented in subclasses.
        """

        raise ImproperlyConfigured(
            'Response subclass %r must implement the autograde_value().'
            'This method should perform the automatic grading and return the '
            'resulting grade. Any additional relevant feedback data might be '
            'saved to the `feedback_data` attribute, which is then is pickled '
            'and saved into the database.' % type(self).__name__)

    def regrade(self, method, commit=True):
        """
        Recompute the grade for the given submission.

        If status != 'done', it simply calls the .autograde() method. Otherwise,
        it accept different strategies for updating to the new grades:
            'update':
                Recompute the grades and replace the old values with the new
                ones. Only saves the submission if the feedback_data or the
                given_grade attributes change.
            'best':
                Only update if the if the grade increase.
            'worst':
                Only update if the grades decrease.
            'best-feedback':
                Like 'best', but updates feedback_data even if the grades
                change.
            'worst-feedback':
                Like 'worst', but updates feedback_data even if the grades
                change.

        Return a boolean telling if the regrading was necessary.
        """
        if self.status != self.STATUS_DONE:
            return self.autograde()

        # We keep a copy of the state, if necessary. We only have to take some
        # action if the state changes.
        def rollback():
            self.__dict__.clear()
            self.__dict__.update(state)

        state = self.__dict__.copy()
        self.autograde(force=True, commit=False)

        # Each method deals with the new state in a different manner
        if method == 'update':
            if state != self.__dict__:
                if commit:
                    self.save()
                return False
            return True
        elif method in ('best', 'best-feedback'):
            if self.given_grade <= state.get('given_grade', 0):
                new_feedback_data = self.feedback_data
                rollback()
                if new_feedback_data != self.feedback_data:
                    self.feedback_data = new_feedback_data
                    if commit:
                        self.save()
                    return True
                return False
            elif commit:
                self.save()
            return True

        elif method in ('worst', 'worst-feedback'):
            if self.given_grade >= state.get('given_grade', 0):
                new_feedback_data = self.feedback_data
                rollback()
                if new_feedback_data != self.feedback_data:
                    self.feedback_data = new_feedback_data
                    if commit:
                        self.save()
                    return True
                return False
            elif commit:
                self.save()
            return True
        else:
            rollback()
            raise ValueError('invalid method: %s' % method)
Beispiel #6
0
class ResponseItem(models.CopyMixin,
                   models.TimeStampedStatusModel,
                   models.PolymorphicModel):
    """
    Represents a student's response to some activity.

    Response objects have 4 different states:

    pending:
        The response has been sent, but was not graded. Grading can be manual or
        automatic, depending on the activity.
    waiting:
        Waiting for manual feedback.
    incomplete:
        For long-term activities, this tells that the student started a response
        and is completing it gradually, but the final response was not achieved
        yet.
    invalid:
        The response has been sent, but contains malformed data.
    done:
        The response was graded and evaluated and it initialized a feedback
        object.

    A response always starts at pending status. We can request it to be graded
    by calling the :func:`Response.autograde` method. This method must raise
    an InvalidResponseError if the response is invalid or ManualGradingError if
    the response subclass does not implement automatic grading.
    """

    class Meta:
        verbose_name = _('response')
        verbose_name_plural = _('responses')

    STATUS_PENDING = 'pending'
    STATUS_INCOMPLETE = 'incomplete'
    STATUS_WAITING = 'waiting'
    STATUS_INVALID = 'invalid'
    STATUS_DONE = 'done'
    STATUS = models.Choices(
        (STATUS_PENDING, _('pending')),
        (STATUS_INCOMPLETE, _('incomplete')),
        (STATUS_WAITING, _('waiting')),
        (STATUS_INVALID, _('invalid')),
        (STATUS_DONE, _('done')),
    )

    response = models.ParentalKey(
        'Response',
        verbose_name=_('response'),
        related_name='items',
    )
    feedback_data = models.JSONField(
        null=True,
        blank=True,
    )
    response_data = models.JSONField(
        null=True,
        blank=True,
    )
    response_hash = models.CharField(
        max_length=32,
        blank=True,
    )
    given_grade = models.DecimalField(
        _('Percentage of maximum grade'),
        help_text=_(
            'This grade is given by the auto-grader and represents the grade '
            'for the response before accounting for any bonuses or penalties.'),
        max_digits=6,
        decimal_places=3,
        blank=True,
        null=True,
    )
    final_grade = models.DecimalField(
        _('Final grade'),
        help_text=_(
            'Similar to given_grade, but can account for additional factors '
            'such as delay penalties or for any other reason the teacher may '
            'want to override the student\'s grade.'),
        max_digits=6,
        decimal_places=3,
        blank=True,
        null=True,
    )
    manual_override = models.BooleanField(
        default=False
    )

    # Status properties
    is_done = property(lambda x: x.status == x.STATUS_DONE)
    is_pending = property(lambda x: x.status == x.STATUS_PENDING)
    is_waiting = property(lambda x: x.status == x.STATUS_WAITING)
    is_invalid = property(lambda x: x.status == x.STATUS_INVALID)

    # Delegate properties
    activity = property(lambda x: x.response.activity.specific)
    user = property(lambda x: x.response.user)
    context = property(lambda x: x.response.context)
    course = property(lambda x: x.activity.course)

    def __init__(self, *args, **kwargs):
        # Django is loading object from the database -- we step out the way
        if args and not kwargs:
            super().__init__(*args, **kwargs)
            return

        # We create the response_data and feedback_data manually always using
        # copies of passed dicts. We save these variables here, init object and
        # then copy this data to the initialized dictionaries
        response_data = kwargs.pop('response_data', None) or {}
        feedback_data = kwargs.pop('feedback_data', None) or {}

        # This part makes a ResponseItem instance initialize from a user +
        # activity + context instead of requiring a response object. The
        # response is automatically created on demand.
        user = kwargs.pop('user', None)
        if user:
            context = kwargs.pop('context', None)
            try:
                activity = kwargs.pop('activity')
            except KeyError:
                raise TypeError(
                    'ReponseItem objects bound to a user must also provide an '
                    'activity parameter.'
                )

            # User-bound constructor tries to obtain the Response object by
            # searching for an specific (user, context, activity) tuple.
            response, created = Response.objects.get_or_create(
                user=user,
                context=context,
                activity=activity
            )
            kwargs['response'] = response

        if 'context' in kwargs or 'activity' in kwargs:
            raise TypeError(
                'Must provide an user to instantiate a bound response item.'
            )
        super().__init__(*args, **kwargs)

        # Now that we have initialized the response item, we fill the data
        # passed in the response_data and feedback_data dictionaries.
        self.response_data = dict(self.response_data or {}, **response_data)
        self.feedback_data = dict(self.response_data or {}, **feedback_data)

    def __str__(self):
        if self.given_grade is None:
            grade = self.status
        else:
            grade = '%s pts' % self.final_grade
        user = self.user
        activity = self.activity
        return '<ResponseItem: %s by %s (%s)>' % (activity, user, grade)

    def save(self, *args, **kwargs):
        if not self.response_hash:
            self.response_hash = self.get_response_hash(self.response_hash)
        super().save(*args, **kwargs)

    def get_feedback_data(self, commit=True):
        """Return the feedback object associated to the given response.

        This method may trigger the autograde() method, if grading was not
        performed yet. If you want to defer database access, call it with
        commit=False to prevent saving any modifications to the response object
        to the database.
        """

        if self.status == self.STATUS_PENDING:
            self.autograde(commit)
        elif self.status == self.STATUS_INVALID:
            raise self.feedback_data
        elif self.status == self.STATUS_WAITING:
            return None
        return self.feedback_data

    def autograde(self, commit=True, force=False, silent=False):
        """
        Performs automatic grading.

        Response subclasses must implement the autograde_compute() method in
        order to make automatic grading work. This method may write any
        relevant information to the `feedback_data` attribute and must return
        a numeric value from 0 to 100 with the given automatic grade.

        Args:
            commit:
                If false, prevents saving the object when grading is complete.
                The user must save the object manually after calling this
                method.
            force:
                If true, force regrading the item even if it has already been
                graded.
            silent:
                Prevents the autograde_signal from triggering in the end of
                a successful autograde.
        """

        if self.status == self.STATUS_PENDING or force:
            try:
                value = self.autograde_compute()
            except self.InvalidResponseError as ex:
                self.status = self.STATUS_INVALID
                self.feedback_data = ex
                self.given_grade = self.final_grade = decimal.Decimal(0)
                if commit:
                    self.save()
                raise

            if value is None:
                self.status = self.STATUS_WAITING
            else:
                self.given_grade = decimal.Decimal(value)
                if self.final_grade is None:
                    self.final_grade = self.given_grade
                self.status = self.STATUS_DONE
                if not silent:
                    autograde_signal.send(
                        self.__class__,
                        response_item=self,
                        given_grade=self.given_grade
                    )
            if commit and self.pk:
                self.save(update_fields=['status', 'feedback_data',
                                         'given_grade', 'final_grade'])
            elif commit:
                self.save()

        elif self.status == self.STATUS_INVALID:
            raise self.feedback_data

    def autograde_compute(self):
        """This method should be implemented in subclasses."""

        raise ImproperlyConfigured(
            'Response subclass %r must implement the autograde_compute().'
            'This method should perform the automatic grading and return the '
            'resulting grade. Any additional relevant feedback data might be '
            'saved to the `feedback_data` attribute, which is then is pickled '
            'and saved into the database.' % type(self).__name__
        )

    def regrade(self, method, commit=True):
        """
        Recompute the grade for the given response item.

        If status != 'done', it simply calls the .autograde() method. Otherwise,
        it accept different strategies for updating to the new grades:
            'update':
                Recompute the grades and replace the old values with the new
                ones. Only saves the response item if the feedback_data or the
                given_grade attributes change.
            'best':
                Only update if the if the grade increase.
            'worst':
                Only update if the grades decrease.
            'best-feedback':
                Like 'best', but updates feedback_data even if the grades
                change.
            'worst-feedback':
                Like 'worst', but updates feedback_data even if the grades
                change.

        Return a boolean telling if the regrading was necessary.
        """
        if self.status != self.STATUS_DONE:
            return self.autograde()

        # We keep a copy of the state, if necessary. We only have to take some
        # action if the state changes.
        def rollback():
            self.__dict__.clear()
            self.__dict__.update(state)

        state = self.__dict__.copy()
        self.autograde(force=True, commit=False)

        # Each method deals with the new state in a different manner
        if method == 'update':
            if state != self.__dict__:
                if commit:
                    self.save()
                return False
            return True
        elif method in ('best', 'best-feedback'):
            if self.given_grade <= state.get('given_grade', 0):
                new_feedback_data = self.feedback_data
                rollback()
                if new_feedback_data != self.feedback_data:
                    self.feedback_data = new_feedback_data
                    if commit:
                        self.save()
                    return True
                return False
            elif commit:
                self.save()
            return True

        elif method in ('worst', 'worst-feedback'):
            if self.given_grade >= state.get('given_grade', 0):
                new_feedback_data = self.feedback_data
                rollback()
                if new_feedback_data != self.feedback_data:
                    self.feedback_data = new_feedback_data
                    if commit:
                        self.save()
                    return True
                return False
            elif commit:
                self.save()
            return True
        else:
            rollback()
            raise ValueError('invalid method: %s' % method)

    @classmethod
    def get_response_hash(cls, response_data):
        """
        Computes a hash for the response_data attribute.
        """

        if response_data:
            data = json.dumps(response_data, default=json_default)
            return md5hash(data)
        return ''

    # Feedback and visualization
    ok_message = _('*Congratulations!* Your response is correct!')
    ok_with_penalties = _('Your response is correct, but you did not achieved '
                          'the maximum grade.')
    wrong_message = _('I\'m sorry, your response is wrong.')
    partial_message = _('Your answer is partially correct: you achieved only '
                        '%(grade)d%% of the total grade.')

    def html_feedback(self):
        """
        A string of html source representing the feedback.
        """

        if self.is_done:
            data = {'grade': (self.final_grade or 0)}

            if self.final_grade == 100:
                return markdown(self.ok_message)
            elif self.given_grade == 100:
                return markdown(self.ok_with_penalties_message)
            elif not self.given_grade:
                return markdown(self.wrong_message)
            else:
                return markdown(self.partial_message % data)
        else:
            return markdown(_('Your response has not been graded yet!'))

    # Permissions
    def can_edit(self, user):
        return False

    def can_view(self, user):
        return user == self.user
class ResponseContext(models.PolymorphicModel):
    """
    Define a different context for a response object.

    The context group responses into explicit groups and may also be used to
    define additional constraints on the correct answers.
    """
    class Meta:
        unique_together = [('activity', 'name')]

    # Basic
    activity = models.ParentalKey(
        'wagtailcore.Page',
        related_name='contexts',
    )
    name = models.CharField(_('name'),
                            max_length=140,
                            blank=True,
                            help_text=_('A unique identifier.'))
    description = models.RichTextField(
        _('description'),
        blank=True,
    )

    # Grading and submissions
    grading_method = models.ForeignKey(
        'cs_core.GradingMethod',
        on_delete=models.SET_DEFAULT,
        default=grading_method_best,
        blank=True,
        help_text=_('Choose the strategy for grading this activity.'))
    single_submission = models.BooleanField(
        _('single submission'),
        default=False,
        help_text=_(
            'If set, students will be allowed to send only a single response.',
        ),
    )

    # Feedback
    delayed_feedback = models.BooleanField(
        _('delayed feedback'),
        default=False,
        help_text=_(
            'If set, students will be only be able to see the feedback after '
            'the activity expires its deadline.'))

    # Deadlines
    deadline = models.DateTimeField(
        _('deadline'),
        blank=True,
        null=True,
    )
    hard_deadline = models.DateTimeField(
        _('hard deadline'),
        blank=True,
        null=True,
        help_text=_(
            'If set, responses submitted after the deadline will be accepted '
            'with a penalty.'))
    delay_penalty = models.DecimalField(
        _('delay penalty'),
        default=25,
        decimal_places=2,
        max_digits=6,
        help_text=_(
            'Sets the percentage of the total grade that will be lost due to '
            'delayed responses.'),
    )

    # Programming languages/formats
    format = models.ForeignKey(
        'cs_core.FileFormat',
        blank=True,
        null=True,
        help_text=_(
            'Defines the required file format or programming language for '
            'student responses, when applicable.'))

    # Extra constraints and resources
    constraints = models.StreamField([], default=[])
    resources = models.StreamField([], default=[])

    def clean(self):
        if not isinstance(self.activity, Activity):
            return ValidationError({
                'parent':
                _('Parent is not an Activity subclass'),
            })
        super().clean()