class Badge(models.Model): """ Represents an abstract badge. Instances of these class are not associated to specific users. GivenBadge makes the association between badges and users. """ track = models.ForeignKey(BadgeTrack, related_name='badges') name = models.CharField(max_length=200) image = models.ImageField( upload_to='gamification/badges/', blank=True, null=True, ) required_points = models.PositiveIntegerField(default=0) required_score = models.PositiveIntegerField(default=0) required_stars = models.PositiveIntegerField(default=0) description = models.TextField() details = models.RichTextField(blank=True) @property def value(self): """ A sortable element that describes the overall badge difficulty. """ return self.required_stars, self.required_points, self.required_score @classmethod def update_for_user(cls, user, **kwargs): """
class Badge(models.TimeStampedModel, models.PolymorphicModel): """ An abstract badge that marks an accomplishment in a given badge track. """ track = models.ForeignKey(BadgeTrack, related_name='badges') name = models.CharField( _('name'), max_length=200, ) slug = models.CharField(unique=True) description = models.TextField( _('description'), help_text=_( 'A detailed description of the accomplishment required to receive ' 'the badge.'), ) message = models.TextField( _('message'), help_text=_( 'The message displayed when users receive the given badge')) image = models.ImageField( upload_to='gamification/badges/', blank=True, null=True, ) required_achievement = models.PositiveIntegerField( default=0, help_text=_( 'Abstract quantity that associated with linear badge tracks.'), ) level = models.PositiveIntegerField( _('Badge level'), help_text=_( 'The badge level: for linear badge tracks, it defines the ordering' 'between different badges.'), ) extra = models.JSONField(default=dict, ) users = models.ManyToManyField( models.User, through='GivenBadge', related_name='badges', ) def issue_badge(self, user): """ Issue badge for the given user. """ self.users.add(user)
class Sprint(models.Model): """ A sprint """ project = models.ForeignKey(ScrumProject, related_name='sprints') description = models.RichTextField(blank=True) start_date = models.DateTimeField() due_date = models.DateTimeField() duration_weeks = models.PositiveIntegerField(default=1, validators=[non_null]) def next_start_date(self, date=None): """ Return the next valid date that the sprint could start after the given. If no arguments are given, consider the current time. """ date = date or now() return date def attach(self, project, commit=True): """ Associate sprint to project, updating required values. """ date = project.finish_date() self.project = project self.start_date = self.next_start_date(date) self.due_date = self.start_date + one_week * self.duration_weeks if commit: self.save()
class Score(models.Model): """ A cache to keep track of the total number of points issued to each user. This can be recreated at any type from the GivenPoints table. """ user = models.OneToOneField( models.User, primary_key=True, related_name='score', ) points = models.PositiveIntegerField() objects = ScoreQuerySet.as_manager() class Meta: ordering = '-points', 'user' def __int__(self): return self.points
class Activity(models.CopyMixin, models.InheritableModel, models.DescribableModel, models.TimeFramedModel): """ Represents a gradable activity inside a course. Activities may not have an explicit grade, but yet may provide points to the students via the gamefication features of Codeschool. Activities can be scheduled to be done in the class or as a homework assignment. Each concrete activity is represented by a different subclass. """ class Meta: verbose_name = _('activity') verbose_name_plural = _('activities') STATUS_OPEN = 'open' STATUS_CLOSED = 'closed' STATUS_VISIBLE = 'visible' STATUS_DRAFT = 'draft' STATUS_EXPIRED = 'expired', STATUS = models.Choices( (STATUS_DRAFT, _('draft')), (STATUS_OPEN, _('open')), (STATUS_CLOSED, _('closed')), (STATUS_VISIBLE, _('visible')), (STATUS_EXPIRED, _('expired')), ) status = models.StatusField( _('status'), help_text=_( 'Only open activities will be visible and active to all students.' ), ) published_at = models.MonitorField(_('date of publication'), monitor='status', when=['open']) icon_src = models.CharField( max_length=50, blank=True, help_text=_( 'Optional icon name that can be used to personalize the activity. ' 'Material icons are available by using the "material:" namespace ' 'as in "material:menu".'), ) owner_content_type = models.ForeignKey( ContentType, verbose_name=_('owner model type'), limit_choices_to=ACTIVITY_OWNER_CONTENT_CHOICES, related_name='activities_as_owner', null=True, blank=True, ) owner_id = models.PositiveIntegerField( _("owner model's id"), null=True, blank=True, ) target_content_type = models.ForeignKey( ContentType, verbose_name=_('target model type'), related_name='activities_as_target', null=True, blank=True, ) target_id = models.PositiveIntegerField( _("target model's id"), null=True, blank=True, ) course = models.ForeignKey( 'cs_courses.Course', related_name='activities', blank=True, null=True, ) parent = models.ForeignKey('self', blank=True, null=True, on_delete=models.SET_NULL, related_name='children') grading_method = models.ForeignKey( GradingMethod, default=grading_method_best, blank=True, ) #: The owner object is either a course object or an user object. This #: object has control to the given activity and define which users have #: permissions to access and edit it. owner_object = GenericForeignKey('owner_content_type', 'owner_id') #: The owner object is either a course object or an user object. This #: object has control to the given activity and define which users have #: permissions to access and edit it. target_object = GenericForeignKey('target_content_type', 'target_id') objects = ActivityQueryset.as_manager() @property def course_(self): """Points to the course object or None if owner is not a course.""" obj = self.owner_object return obj if isinstance(obj, Course) else None @property def owner(self): """Points to the user that owns the activity.""" obj = self.owner_object if isinstance(obj, models.User): return obj else: return self.course.owner #: Define the default material icon used in conjunction with instances of #: the activity class. default_material_icon = 'help' #: The response class associated with the given activity. response_class = None @property def material_icon(self): """The material icon used in conjunction with the activity.""" if self.icon_src.startswith('material:'): return self.icon_src[9:] return self.default_material_icon @property def icon_html(self): """A string of HTML source that points to the icon element fo the activity.""" return '<i class="material-icon">%s</i>' % self.material_icon # Permission control def can_edit(self, user): """ Return True if user has permissions to edit activity. """ return user == self.owner or self.course.can_edit(user) def can_view(self, user): """ Return True if user has permission to view activity. """ course = self.course return (self.can_edit(user) or user in course.students.all() or user in self.staff.all()) # Other functions def get_absolute_url(self): return reverse('activity:detail', kwargs={'pk': self.pk}) # Response and grading control def has_user_response(self, user): """ Return True if the user has responsed to the question. Use either :func:`Activity.get_user_response` or :func:`Activity.get_user_responses` methods to fetch the user responses. """ return bool(self.responses.filter(user=user)) def get_user_response(self, user, method='first'): """ Return some response given by the user or None if the user has not responded. Allowed methods: unique: Expects that response is unique and return it (or None). any: Return a random user response. first: Return the first response given by the user. last: Return the last response given by the user. best: Return the response with the best final grade. worst: Return the response with the worst final grade. best-given: Return the response with the best given grade. worst-given: Return the response with the worst given grade. """ responses = self.responses.filter(user=user) first = lambda x: x.select_subclasses().first() if method == 'unique': N = self.responses.count() if N == 0: return None elif N == 1: return response.select_subclasses().first() else: raise ValueError('more than one response found for user %r' % user.username) elif method == 'any': return first(responses) elif method == 'first': return first(responses.order_by('created')) elif method == 'last': return first(responses.order_by('-created')) elif method in ['best', 'worst', 'best-given', 'worst-given']: raise NotImplementedError('method = %r is not implemented yet' % method) else: raise ValueError('invalid method: %r' % method) def get_user_responses(self, user): """ Return all responses by the given user. """ return self.responses.filter(user=user).select_subclasses() def get_user_final_response(self, user): """Return the FinalResponse object associated with the given user.""" try: return self.final_responses.get(user=user) except ObjectDoesNotExist: return self.final_responses.create(user=user) def get_user_grade(self, user): """ Return the numeric grade associated with the user. """ final_response = self.get_user_final_response(user) return final_response.grade() def select_responses(self): """ Return a queryset with all responses related to the given question. """ from cs_activities.models import Response if not force: responses = self.responses.filter(status=Response.STATUS_PENDING) else: responses = self.responses.all() return responses.select_subclasses() def grade_responses(self, force=False): """ Grade all responses that had not been graded yet. This function may take a while to run, locking the server. Maybe it is a good idea to run it as a task or in a separate thread. Args: force (boolean): If True, forces the response to be re-graded. """ # Run autograde on each responses for response in responses: response.autograde(force=force) def select_users(self): """ Return a queryset with all users that responded to the activity. """ user_ids = self.responses.values_list('user', flat=True).distinct() users = models.User.objects.filter(id__in=user_ids) return users def get_grades(self, users=None): """ Return a dictionary mapping each user to their respective grade in the activity. If a list of users is given, include only the users in this list. """ if users is None: users = self.select_users() grades = {} for user in users: grade = self.get_user_grade(user) grades[user] = grade return grades
class CodingIoQuestion(Question): """ CodeIo questions evaluate source code and judge them by checking if the inputs and corresponding outputs match an expected pattern. """ class Meta: verbose_name = _('Programming question (IO-based)') verbose_name_plural = _('Programming questions (IO-based)') EXT_TO_METHOD_CONVERSIONS = dict( Question.EXT_TO_METHOD_CONVERSIONS, md='markio', ) iospec_size = models.PositiveIntegerField( _('number of iospec template expansions'), default=10, help_text=_( 'The desired number of test cases that will be computed after ' 'comparing the iospec template with the answer key. This is only a ' 'suggested value and will only be applied if the response template ' 'uses input commands to generate random input.'), ) iospec_source = models.TextField( _('response template'), help_text=_( 'Template used to grade I/O responses. See ' 'http://pythonhosted.org/iospec for a complete reference on the ' 'template format.'), ) iospec_hash = models.CharField( max_length=32, blank=True, help_text=_('A hash to keep track of iospec updates.'), ) timeout = models.FloatField( _('timeout in seconds'), blank=True, default=1.0, help_text=_( 'Defines the maximum runtime the grader will spend evaluating ' 'each test case.'), ) language = models.ForeignKey( ProgrammingLanguage, on_delete=models.SET_NULL, blank=True, null=True, help_text=_( 'Programming language associated with question. Leave it blank in ' 'order to accept submissions in any programming language. This ' 'option should be set only for questions that tests specific ' 'programming languages constructs or require techniques that only ' 'make sense in specific programming languages.'), ) __iospec_updated = False __answers = () @lazy def iospec(self): """ The IoSpec structure corresponding to the iospec_source. """ return parse_iospec(self.iospec_source) def __init__(self, *args, **kwargs): # Supports automatic conversion between iospec data and iospec_source iospec = kwargs.pop('iospec', None) if iospec: kwargs['iospec_source'] = iospec.source() self.iospec = iospec super().__init__(*args, **kwargs) def load_from_file_data(self, file_data): fake_post = super().load_from_file_data(file_data) fake_post['iospec_source'] = self.iospec_source return fake_post def clean(self): """ Validate the iospec_source field. """ super().clean() # We first should check if the iospec_source has been changed and would # require a possibly expensive validation. source = self.iospec_source iospec_hash = md5hash(source) if self.iospec_hash != iospec_hash: try: self.iospec = iospec = parse_iospec(self.iospec_source) except Exception as ex: raise ValidationError( {'iospec_source': _('invalid iospec syntax: %s' % ex)}) # Now we check if the new iospec requires an answer key code and # if it has some answer key defined self.__iospec_updated = True return if (not iospec.is_expanded) and not self.answers.has_program(): raise ValidationError({ 'iospec_source': _('You iospec definition uses a command or an @input block ' 'and thus requires an example grading code. Please define ' 'an "Answer Key" item with source code for at least one ' 'programming language.') }) def load_from_markio(self, file_data): """ Load question parameters from Markio file. """ data = markio.parse(file_data) # Load simple data from markio self.title = data.title or self.title self.short_description = (data.short_description or self.short_description) self.timeout = data.timeout or self.timeout self.author_name = data.author or self.author_name self.iospec_source = data.tests or self.iospec_source # Load main description # noinspection PyUnresolvedReferences self.body = markdown_to_blocks(data.description) # Add answer keys answer_keys = OrderedDict() for (lang, answer_key) in data.answer_key.items(): language = programming_language(lang) key = self.answers.create(question=self, language=language, source=answer_key) answer_keys[lang] = key for (lang, placeholder) in data.placeholder.items(): if placeholder is None: continue try: answer_keys[lang].placeholder = placeholder except KeyError: language = ProgrammingLanguage.objects.get(lang) self.answer_keys.create(question=self, language=language, placeholder=placeholder) self.__answers = list(answer_keys.values()) # Serialization methods: support markio and sets it as the default # serialization method for CodingIoQuestion's @classmethod def load_markio(cls, source): """ Creates a CodingIoQuestion object from a Markio object or source string and saves the resulting question in the database. This function can run without touching the database if the markio file does not define any information that should be saved in an answer key. Args: source: A string with the Markio source code. Returns: question: A question object. """ raise NotImplementedError def dump_markio(self): """ Serializes question into a string of Markio source. """ tree = markio.Markio( title=self.name, author=self.author_name, timeout=self.timeout, short_description=self.short_description, description=self.long_description, tests=self.iospec_source, ) for key in self.answer_keys.all(): tree.add_answer_key(key.source, key.language.ref) tree.add_placeholder(key.placeholder, key.language.ref) return tree.source() def full_clean(self, *args, **kwargs): if self.__answers: self.answers = self.__answers super().full_clean(*args, **kwargs) def placeholder(self, language=None): """ Return the placeholder text for the given language. """ key = self.answers[language or self.language] if key is None: return '' return key.placeholder def reference_source(self, language=None): """ Return the reference source code for the given language or None, if no reference is found. """ key = self.answers[language or self.language] if key is None: return '' return key.source def run_code(self, source, language=None, iospec=None): """ Run the given source code string of the given programming language using the default or the given IoSpec. If no code string is given, runs the reference source code, if it exists. """ key = self.answers[language or self.language] return key.run(source, iospec) def update_iospec_source(self): """ Updates the iospec_source attribute with the current iospec object. Any modifications made to `self.iospec` must be saved explicitly to persist in the database. """ if 'iospec' in self.__dict__: self.iospec_source = self.iospec.source() def submit(self, user, source=None, language=None, **kwargs): # Fetch info from response_data response_data = kwargs.get('response_data', {}) if source is None and 'source' in response_data: source = response_data.pop('source') if language is None and 'language' in response_data: language = response_data.pop('language') # Assure language is valid language = language or self.language if not language: raise ValueError( 'could not determine the programming language for ' 'the submission') # Assure response data is empty if response_data: key = next(iter(response_data)) raise TypeError('invalid or duplicate parameter passed to ' 'response_data: %r' % key) # Construct response data and pass it to super response_data = { 'language': language.ref, 'source': source, } return super().submit(user, response_data=response_data, **kwargs) # Serving pages and routing template = 'questions/coding_io/detail.jinja2' template_submissions = 'questions/coding_io/submissions.jinja2' def get_context(self, request, *args, **kwargs): context = dict(super().get_context(request, *args, **kwargs), form=True) # Select default mode for the ace editor if self.language: context['default_mode'] = self.language.ace_mode() else: context['default_mode'] = get_config('CODESCHOOL_DEFAULT_ACE_MODE', 'python') # Enable language selection if self.language is None: context['select_language'] = True context['languages'] = ProgrammingLanguage.supported.all() else: context['select_language'] = False return context @srvice.route(r'^submit-response/$') def route_submit(self, client, source=None, language=None, **kwargs): """ Handles student responses via AJAX and a srvice program. """ # User must choose language if not language: if self.language is None: client.dialog('<p class="dialog-text">%s</p>' % _('Please select the correct language')) return language = self.language else: language = programming_language(language) # Bug with <ace-editor>? if not source or source == '\x01\x01': client.dialog('<p class="dialog-text">%s</p>' % _('Internal error: please send it again!')) return super().route_submit( client=client, language=language, source=source, ) @srvice.route(r'^placeholder/$') def route_placeholder(self, request, language): """ Return the placeholder code for some language. """ return self.get_placehoder(language) # Wagtail admin content_panels = Question.content_panels[:] content_panels.insert( -1, panels.MultiFieldPanel([ panels.FieldPanel('iospec_size'), panels.FieldPanel('iospec_source'), ], heading=_('IoSpec definitions'))) content_panels.insert( -1, panels.InlinePanel('answers', label=_('Answer keys'))) settings_panels = Question.settings_panels + [ panels.MultiFieldPanel([ panels.FieldPanel('language'), panels.FieldPanel('timeout'), ], heading=_('Options')) ]
class CodingIoQuestion(Question, models.StatusModel): """ CodeIo questions evaluate source code and judge them by checking if the inputs and corresponding outputs match an expected pattern. """ STATUS_INVALID = 'invalid' STATUS_UGLY = 'ugly' STATUS_DIRTY = 'dirty' STATUS_VALID = 'valid' STATUS_INCOMPLETE = 'incomplete' STATUS = models.Choices( (STATUS_INCOMPLETE, _('is not yet fully initialized')), (STATUS_INVALID, _('no valid answers')), (STATUS_UGLY, _('inconsistent answers')), (STATUS_DIRTY, _('some valid answers')), (STATUS_VALID, _('valid')), ) iospec_size = models.PositiveIntegerField( _('number of iospec template expansions'), default=0, blank=True, help_text=_('The desired number of test cases that will be computed' 'after comparing the iospec template with the answer key.' 'This is only a suggested value and will only be applied if' 'the response template uses input commands to generate' 'random input.'), ) iospec_source = models.TextField( _('response template'), blank=True, help_text=_('Template used to grade I/O responses. See ' 'http://pythonhosted.org/iospec for a complete reference ' 'on the template format.'), ) timeout = models.FloatField( _('timeout in seconds'), blank=True, default=5.0, help_text=_('Defines the maximum runtime the grader will spend ' 'evaluating each test case.'), ) tracker = FieldTracker() @property def iospec(self): """The IoSpec structure corresponding to the iospec_source.""" return parse_iospec(self.iospec_source) @property def hash(self): """The hash for the iospec_source string. This hash is compared to a hash registered to each answer key to check if it has the most current iospec data.""" return md5hash(self.iospec_source + str(self.iospec_size)) @property def is_answer_key_complete(self): """Return True if an answer key exists for all programming languages.""" refs = self.is_answer_keys.values('language__ref', flatten=True) all_refs = ProgrammingLanguage.objects.values('ref', flatten=True) return set(all_refs) == set(refs) class Meta: app_label = 'cs_questions' verbose_name = _('input/output question') verbose_name_plural = _('input/output questions') # Importing and exporting @classmethod def from_markio(cls, source, commit=None, return_keys=False): """Creates a CodingIoQuestion object from a Markio object r source string and saves the resulting question in the database. This function can run without touching the database if the markio file does not define any information that should be saved in an answer key. Args: source: A string with the Markio source code. commit (bool): If True (default), saves resulting question in the database. return_keys (bool): If True, also return a dictionary mapping language references to answer keys. Returns: question: A question object. [answer_keys]: A map from language references to :class:`AnswerKeyItem` objects. """ import markio if isinstance(source, markio.Markio): data = source else: data = markio.parse_string(source) # Create question object from parsed markio data question = CodingIoQuestion( title=data.title, author_name=data.author, timeout=data.timeout, short_description=data.short_description, long_description=data.description, iospec_source=data.tests, ) saving(question, commit) # Add answer keys answer_keys = {} for (lang, answer_key) in data.answer_key.items(): language = programming_language(lang) key = saving(CodingIoAnswerKey(question=question, language=language, source=answer_key), commit) answer_keys[lang] = key for (lang, placeholder) in data.placeholder.items(): if placeholder is None: continue try: answer_keys[lang].placeholder = placeholder saving(answer_keys[lang], commit, update_fields=['placeholder']) except KeyError: language = ProgrammingLanguage.objects.get(lang) key = CodingIoAnswerKey(question=question, language=language, placeholder=placeholder) saving(key, commit) # Question is done! if return_keys: answer_keys = {key.language.ref: key for key in answer_keys.values()} return question, answer_keys return question @classmethod def from_data(cls, source): """Return a new CodingIoQuestion instance from a string of Markio data. This API is used by the HasUploadMixin in the create view.""" return cls.from_markio(source.decode('utf8')) def to_markio(self): """Serializes question into a string of Markio source.""" import markio tree = markio.Markio( title=self.name, author=self.author_name, timeout=self.timeout, short_description=self.short_description, description=self.long_description, tests=self.iospec_source, ) for key in self.answer_keys.all(): tree.add_answer_key(key.source, key.language.ref) tree.add_placeholder(key.placeholder, key.language.ref) return tree.source() def to_data(self, type=None): """Render question as a Markio source. This API is used by the DetailView in the CRUD interface.""" if type in (None, 'markio'): return self.to_markio() else: return NotImplemented # Validation def update(self, save=True, validate=True): """Update and validate all answer keys.""" exception = None expanded_sources = {} invalid_languages = set() valid_languages = set() def validate_answer_keys(): nonlocal exception for key in self.answer_keys.all(): try: if not key.is_update: key.question = self key.update(save, validate) if not key.is_valid: invalid_languages.add(key.language.ref) elif key.source: valid_languages.add(key.language.ref) except key.ValidationError as ex: exception = ex exception.__traceback__ = exception.__traceback__ if key.iospec_source: expanded_sources[key.language.ref] = key.iospec_source if len(expanded_sources) == 0: self.status = 'invalid' elif len(set(expanded_sources.values())) != 1: self.status = 'ugly' elif invalid_languages: if valid_languages: self.status = 'dirty' else: self.status = 'invalid' else: self.status = 'valid' # Save fields if rollback is necessary iospec_source = self.iospec_source iospec_size = self.iospec_size has_changed = (self.tracker.has_changed('iospec_source') or self.tracker.has_changed('iospec_size')) # If fields had changed, update and restore original values if has_changed: self.save(update_fields=['iospec_source', 'iospec_size']) try: validate_answer_keys() finally: if not save: self.iospec_size = iospec_size self.iospec_source = iospec_source self.save(update_fields=['iospec_source', 'iospec_size']) else: validate_answer_keys() # Force save if necessary if save: self.save() def update_keys(self): """Update all keys that were not updated.""" for key in self.answer_keys.exclude(iospec_hash=self.hash): key.update(validate=False) def get_validation_errors(self, lang=None, test_iospec=True): """Raise ValueError if some answer key is invalid or produce invalid iospec expansions. Return a valid iospec tree expansion or None if no expansion was possible (e.g., by the lack of source code in the answer key).""" # It cannot be valid if the iospec source does not not parse if test_iospec: try: tree = parse_iospec(self.iospec) except SyntaxError as ex: raise ValueError('invalid iospec syntax: %s' % ex) # Expand to all langs if lang is not given if lang is None: keys = self.answer_keys.exclude(source='') langs = keys.values_list('language', flat=True) expansions = [self.is_valid(lang, test_iospec=False) for lang in langs] if not expansions: return None if iospec.ioequal(expansions): return expansions[0] # Test an specific language if isinstance(lang, str): lang = ProgrammingLanguage.get(ref=lang) try: key = self.answer_keys.get(language=lang) except self.DoesNotExist: return None if key.source: result = run_code(key.source, key, lang=lang.ref) if result.has_errors(): raise result.get_exception() return result else: return None # Other API def get_placeholder(self, lang): """Return the placeholder text for the given language.""" if isinstance(lang, str): try: lang = ProgrammingLanguage.objects.get(ref=lang) except ProgrammingLanguage.DoesNotExist: return '' try: key = self.answer_keys.get(language=lang) return key.placeholder except CodingIoAnswerKey.DoesNotExist: return '' def grade(self, response, error=None): """Grade the given response object and return the corresponding feedback object.""" try: key = self.answer_keys.get(language=response.language) key.assure_is_valid(error) iospec_data = key.iospec except CodingIoAnswerKey.DoesNotExist: self.update_keys() # Get all sources iospec_sources = self.answer_keys.filter(is_valid=True)\ .values_list('iospec_source', flat=True) iospec_sources = set(iospec_sources) # Check if there is only a single distinct source if not iospec_sources: iospec_data = self.iospec.copy() iospec_data.expand_inputs() if not all(isinstance(x, SimpleTestCase) for x in iospec_data): raise ( error or CodingIoAnswerKey.ValidationError(iospec_data.pformat()) ) elif len(iospec_sources) == 1: iospec_data = parse_iospec(next(iter(iospec_sources))) else: raise error or CodingIoAnswerKey.ValidationError(iospec_sources) # Construct ejudge feedback object lang = response.language.ref source = response.source return grade_code(source, iospec_data, lang=lang)
class Course(models.DateFramedModel, models.TimeStampedModel): """One specific occurrence of a course for a given teacher in a given period.""" # Fields discipline = models.ForeignKey( Discipline, related_name='courses' ) teacher = models.ForeignKey( models.User, related_name='owned_courses' ) students = models.ManyToManyField( models.User, related_name='enrolled_courses', blank=True, ) staff = models.ManyToManyField( models.User, related_name='courses_as_staff', blank=True, ) current_lesson_index = models.PositiveIntegerField(default=0, blank=True) current_lesson_start = models.DateField(blank=True, null=True) is_active = models.BooleanField(_('is active'), default=False) # Managers @property def past_activities(self): return ( self.activities.filter(status=Activity.STATUS.concluded) | self.activities.filter(end__lt=timezone.now()) ).select_subclasses() @property def open_activities(self): return (self.activities.timeframed.all() & self.activities.filter(status=Activity.STATUS.open)).select_subclasses() @property def pending_activities(self): return (self.activities.filter(status=Activity.STATUS.draft) | (self.activities.filter(status=Activity.STATUS.open) & self.activities.filter(end__lt=timezone.now()))).select_subclasses() name = property(lambda s: s.discipline.name) short_description = property(lambda s: s.discipline.short_description) long_description = property(lambda s: s.discipline.long_description) def __str__(self): return '%s (%s)' % (self.discipline.name, self.teacher.first_name) def get_absolute_url(self): return url_reverse('course-detail', args=(self.pk,)) def user_activities(self, user): """Return a list of all activities that are valid for the given user""" return self.activities.select_subclasses() def activity_duration(self): """Return the default duration for an activity starting from now.""" return 120 def next_time_slot(self): """Return the start and end times for the next class in the course. If a time slot is currently open, return it.""" now = timezone.now() return now, now + timezone.timedelta(self.activity_duration()) def next_date(self, date=None): """Return the date of the next available time slot."""
class CodingIoQuestion(Question): """ CodeIo questions evaluate source code and judge them by checking if the inputs and corresponding outputs match an expected pattern. """ class Meta: verbose_name = _('input/output question') verbose_name_plural = _('input/output questions') iospec_size = models.PositiveIntegerField( _('number of iospec template expansions'), default=10, help_text=_( 'The desired number of test cases that will be computed after ' 'comparing the iospec template with the answer key. This is only a ' 'suggested value and will only be applied if the response template ' 'uses input commands to generate random input.'), ) iospec_source = models.TextField( _('response template'), help_text=_( 'Template used to grade I/O responses. See ' 'http://pythonhosted.org/iospec for a complete reference on the ' 'template format.'), ) iospec_hash = models.CharField( max_length=32, blank=True, help_text=_('A hash to keep track of iospec updates.'), ) timeout = models.FloatField( _('timeout in seconds'), blank=True, default=1.0, help_text=_( 'Defines the maximum runtime the grader will spend evaluating ' 'each test case.'), ) is_usable = models.BooleanField( _('is usable'), help_text=_( 'Tells if the question has at least one usable iospec entry. A ' 'complete iospec may be given from a single iospec source or by a ' 'combination of a valid source and a reference computer program.')) is_consistent = models.BooleanField( _('is consistent'), help_text=_( 'Checks if all given answer keys are consistent with each other. ' 'The question might become inconsistent by the addition of an ' 'reference program that yields different results from the ' 'equivalent program in a different language.')) @lazy def iospec(self): """ The IoSpec structure corresponding to the iospec_source. """ return parse_iospec(self.iospec_source) @property def is_answer_key_complete(self): """ Return True if an answer key item exists for all programming languages. """ refs = self.is_answer_keys.values('language__ref', flatten=True) all_refs = ProgrammingLanguage.objects.values('ref', flatten=True) return set(all_refs) == set(refs) @bound_property def language(self): """ Instances can be bound to a programming language. """ return getattr(self, '_language_bind', None) @language.setter def language(self, value): self._language_bind = programming_language(value, raises=False) @property def is_language_bound(self): return self.language is not None @property def default_language(self): """ The main language associated with this question if a single answer key is defined. """ return self.answer_key_items.get().language def _language(self, language=None, raises=True): # Shortcut used internally to normalize the given language if language is None: return self.language or self.default_language return programming_language(language, raises) def __init__(self, *args, **kwargs): # Supports automatic conversion between iospec data and iospec_source iospec = kwargs.pop('iospec', None) if iospec: kwargs['iospec_source'] = iospec.source() self.iospec = iospec super().__init__(*args, **kwargs) def clean(self): """ Validate the iospec_source field. """ super().clean() # We first should check if the iospec_source has been changed and thus # requires a possibly expensive validation. source = self.iospec_source iospec_hash = md5hash(source) if self.iospec_hash != iospec_hash: try: self.iospec = iospec.parse_string(self.iospec_source) except Exception: raise ValidationError( {'iospec_source': _('invalid iospec syntax')}) else: self.iospec_hash = iospec_hash if self.pk is None: self.is_usable = self.iospec.is_simple self.is_consistent = True else: self.is_usable = self._is_usable(self.iospec) self.is_consistent = self._is_consistent(self.iospec) def _is_usable(self, iospec): """ This function is triggered during the clean() validation when a new iospec data is inserted into the database. """ # Simple iospecs are always valid since they can be compared with # arbitrary programs. if iospec.is_simple_io: return True # For now we reject all complex iospec structures return False def _is_consistent(self, iospec): """ This function is triggered during the clean() validation when a new iospec data is inserted into the database. """ # Simple iospecs always produce consistent answer keys since we prevent # invalid reference programs of being inserted into the database # during AnswerKeyItem validation. if iospec.is_simple_io: return True # For now we reject all complex iospec structures return False # Serialization methods: support markio and sets it as the default # serialization method for CodingIoQuestion's @classmethod def load_markio(cls, source): """ Creates a CodingIoQuestion object from a Markio object or source string and saves the resulting question in the database. This function can run without touching the database if the markio file does not define any information that should be saved in an answer key. Args: source: A string with the Markio source code. Returns: question: A question object. """ import markio if isinstance(source, markio.Markio): data = source else: data = markio.parse_string(source) # Create question object from parsed markio data question = CodingIoQuestion.objects.create( title=data.title, author_name=data.author, timeout=data.timeout, short_description=data.short_description, long_description=data.description, iospec_source=data.tests, ) # Add answer keys answer_keys = {} for (lang, answer_key) in data.answer_key.items(): language = programming_language(lang) key = question.answer_keys.create(language=language, source=answer_key) answer_keys[lang] = key for (lang, placeholder) in data.placeholder.items(): if placeholder is None: continue try: answer_keys[lang].placeholder = placeholder answer_keys[lang].save(update_fields=['placeholder']) except KeyError: language = ProgrammingLanguage.objects.get(lang) question.answer_keys.create(language=language, placeholder=placeholder) return question @classmethod def load(cls, format='markio', **kwargs): return super().load(format=format, **kwargs) def dump_markio(self): """ Serializes question into a string of Markio source. """ import markio tree = markio.Markio( title=self.name, author=self.author_name, timeout=self.timeout, short_description=self.short_description, description=self.long_description, tests=self.iospec_source, ) for key in self.answer_keys.all(): tree.add_answer_key(key.source, key.language.ref) tree.add_placeholder(key.placeholder, key.language.ref) return tree.source() def answer_key_item(self, language=None): """ Return the AnswerKeyItem instance for the requested language or None if no object is found. """ language = self._language(language) try: return self.answer_key_items.get(language=language) except AnswerKeyItem.DoesNotExist: return None def answer_key(self, language=None): """ Return the answer key IoSpec object associated with the given language. """ key = self.answer_key_item(language) if key is None or key.iospec_source is None: new_key = self.answer_key_item() if key == new_key: if self.iospec.is_simple: raise ValueError('no valid iospec is defined for the ' 'question') return iospec.expand_inputs(self.iospec_size) key = new_key # We check if the answer key item is synchronized with the parent hash if key.iospec_hash != key.parent_hash(): try: key.update(self.iospec) except ValidationError: return self.iospec return key.iospec def placeholder(self, language=None): """ Return the placeholder text for the given language. """ if key is None: return '' return key.placeholder def reference_source(self, language=None): """ Return the reference source code for the given language or None, if no reference is found. """ key = self.answer_key_item(language) if key is None: return '' return key.source def run_code(self, source=None, iospec=None, language=None): """ Run the given source code string for the programming language using the default IoSpec. If no code string is given, runs the reference source code, it it exists. """ if language is None: language = self.answer_key_items.get().language key = self.answer_key_item(language) return key.run(source, iospec) def update_iospec_source(self): """ Updates the iospec_source attribute with the current iospec object. Any modifications made to `self.iospec` must be saved explicitly to persist on the database. """ if 'iospec' in self.__dict__: self.iospec_source = self.iospec.source() def register_response_item(self, source, language=None, **kwargs): response_data = { 'language': self._language(language).ref, 'source': source, } kwargs.update(response_data=response_data) return super().register_response_item(**kwargs) # Serving pages and routing @srvice.route(r'^submit-response/$') def respond_route(self, client, source=None, language=None, **kwargs): """ Handles student responses via AJAX and a srvice program. """ if not language: client.dialog('<p>Please select the correct language</p>') return # Bug with <ace-editor>? if not source or source == '\x01\x01': client.dialog('<p>Internal error: please send it again!</p>') return language = programming_language(language) self.bind(client.request, language=language, **kwargs) response = self.register_response_item(source, autograde=True) html = render_html(response.feedback) client.dialog(html) @srvice.route(r'^placeholder/$') def get_placeholder_route(self, request, language): """ Return the placeholder code for some language. """ return self.get_placehoder(language) def get_context(self, request, *args, **kwargs): context = super().get_context(request, *args, **kwargs) context['form'] = ResponseForm(request.POST) return context # Wagtail admin content_panels = Question.content_panels[:] content_panels.insert( -1, panels.MultiFieldPanel([ panels.FieldPanel('iospec_size'), panels.FieldPanel('iospec_source'), ], heading=_('IoSpec definitions'))) content_panels.insert( -1, panels.InlinePanel('answer_key_items', label=_('Answer keys')))
class CodingIoQuestion(Question): """ CodeIo questions evaluate source code and judge them by checking if the inputs and corresponding outputs match an expected pattern. """ class Meta: verbose_name = _('Programming question (IO-based)') verbose_name_plural = _('Programming questions (IO-based)') num_pre_tests = models.PositiveIntegerField( _('# of pre-test examples'), default=3, validators=[validators.positive_integer_validator], help_text=_( 'The desired number of test cases that will be computed after ' 'comparing the iospec template with the correct answer. This is ' 'only a suggested value and will only be applied if the response ' 'template uses input commands to generate random input.'), ) pre_tests_source = models.TextField( _('response template'), blank=True, validators=[validators.iospec_source_validator], help_text=_( 'Template used to grade I/O responses. See ' 'http://pythonhosted.org/iospec for a complete reference on the ' 'template format.'), ) num_post_tests = models.PositiveIntegerField( _('# of post-test examples'), validators=[validators.positive_integer_validator], default=20) post_tests_source = models.TextField( _('response template (post evaluation)'), validators=[validators.iospec_source_validator], blank=True, help_text=_( 'These tests are used only in a second round of corrections and is ' 'not immediately shown to users.'), ) test_state_hash = models.CharField( max_length=32, blank=True, help_text=_('A hash to keep track of iospec sources updates.'), ) timeout = models.FloatField( _('timeout in seconds'), validators=[validators.timeout_validator], blank=True, default=1.0, help_text=_( 'Defines the maximum runtime the grader will spend evaluating ' 'each test case.'), ) default_placeholder = models.TextField( _('placeholder'), blank=True, help_text=_('Default placeholder message that is used if it is not ' 'defined for the given language. This will appear as a ' 'block of comment in the beginning of the submission.')) language = models.ForeignKey( ProgrammingLanguage, on_delete=models.SET_NULL, blank=True, null=True, help_text=_( 'Programming language associated with question. Leave it blank in ' 'order to accept submissions in any programming language. This ' 'option should be set only for questions that tests specific ' 'programming languages constructs or require techniques that only ' 'make sense for specific programming languages.'), ) # Fields for storing the results of an async post-validation. error_field = models.CharField(max_length=20, blank=True) error_message = models.TextField(blank=True) ignore_programming_errors = models.BooleanField( default=False, help_text=_( 'Mark this if you want to ignore programming errors this time. It ' 'will ignore errors once, but you still have to fix the source ' 'of those errors to make the question become operational.')) __answers = () _iospec_expansion_is_dirty = False @property def pre_tests(self): try: return self._pre_tests except AttributeError: self._pre_tests = parse_iospec(self.pre_tests_source) return self._pre_tests @pre_tests.setter def pre_tests(self, value): self._pre_tests = value self.pre_tests_source = value.source() @pre_tests.deleter def pre_tests(self): try: del self._pre_tests except AttributeError: pass @property def post_tests(self): try: return self._post_tests except AttributeError: if self.post_tests_source: post_tests = parse_iospec(self.post_tests_source) else: post_tests = IoSpec() self._post_tests = ejudge.combine_iospec(self.pre_tests, post_tests) return self._post_tests @post_tests.setter def post_tests(self, value): pre_tests = self.pre_tests value = IoSpec([test for test in value if test not in pre_tests]) self._post_tests = ejudge.combine_iospec(self.pre_tests, value) self.post_tests_source = value.source() @post_tests.deleter def post_tests(self): try: del self._post_tests except AttributeError: pass submission_class = CodingIoSubmission def load_post_file_data(self, file_data): fake_post = super().load_post_file_data(file_data) fake_post['pre_tests_source'] = self.pre_tests_source fake_post['post_tests_source'] = self.post_tests_source return fake_post # Expanding and controlling the tests state def has_test_state_changed(self): """ Return True if test state has changed. """ return self.test_state_hash == compute_test_state_hash(self) def get_current_test_state(self, update=False): """ Return a current TestState object synchronized with the current pre and post tests. It raises a ValidationError if an error is encountered during the recreation of the test state. """ if update: hash = compute_test_state_hash(self) else: hash = self.test_state_hash try: return TestState.objects.get(question=self, hash=hash) except TestState.DoesNotExist: pre_tests = self.pre_tests post_tests = self.post_tests def expand(x): result = expand_tests(self, x) check_expansions_with_all_programs(self, result) return result pre_source = expand(pre_tests).source() post_source = expand(post_tests).source() return TestState.objects.create( question=self, hash=hash, pre_tests_source=self.pre_tests_source, post_tests_source=self.post_tests_source, pre_tests_source_expansion=pre_source, post_tests_source_expansion=post_source, ) def get_expanded_pre_tests(self): """ Return an IoSpec object with the result of pre tests expansions. """ state = self.get_current_test_state() source = state.pre_tests_source_expansion return parse_iospec(source) def get_expand_post_tests(self): """ Return an IoSpec object with the result of post tests expansions. """ state = self.get_current_test_state() source = state.post_tests_source_expansion return parse_iospec(source) def __expand_tests_to_source(self, tests): """ Return the source of a iospec object full expansion. Similar to .expand_tests(), but return a string with the source code expansion. """ if tests is None: return '' return self._expand_tests(tests) # Code runners def check_with_code(self, source, tests, language=None, timeout=None): """ Wrapped version of check_with_code() that uses question's own timeout and language as default. """ language = get_programming_language(language or self.language) timeout = timeout or self.timeout ejudge.check_with_code(source, tests, language, timeout) def run_code(self, source, tests, language=None, timeout=None): """ Wrapped version of run_code() that uses question's own timeout and language as default. """ language = get_programming_language(language or self.language) timeout = timeout or self.timeout return ejudge.run_code(source, tests, language, timeout) def grade_code(self, source, inputs, language=None, timeout=None): """ Wrapped version of grade_code() that uses question's own timeout and language as default. """ language = get_programming_language(language or self.language) timeout = timeout or self.timeout return ejudge.grade_code(source, inputs, language, timeout) def expand_from_code(self, source, inputs, language=None, timeout=None): """ Wrapped version of expand_from_code() that uses question's own timeout and language as default. """ language = get_programming_language(language or self.language) timeout = timeout or self.timeout return ejudge.expand_from_code(source, inputs, language, timeout) # Saving & validation def save(self, *args, **kwargs): self.test_state_hash = compute_test_state_hash(self) if not self.author_name and self.owner: name = self.owner.get_full_name() or self.owner.username email = self.owner.email self.author_name = '%s <%s>' % (name, email) super().save(*args, **kwargs) def clean(self): super().clean() if self.has_test_state_changed() or self.has_code_changed(): logger.debug('%r: recomputing tests' % self.title) self.schedule_validation() def full_clean(self, *args, **kwargs): if self.__answers: self.answers = self.__answers super().full_clean(*args, **kwargs) def full_clean_expansions(self): self.get_current_test_state(update=True) def full_clean_answer_keys(self): """ Performs a full_clean() validation step on all answer key objects. """ for key in self.answers.all(): try: key.question = self key.full_clean() except ValidationError as ex: raise validators.invalid_related_answer_key_error(key, ex) def full_clean_all(self, *args, **kwargs): self.full_clean(*args, **kwargs) self.full_clean_answer_keys() self.full_clean_expansions() def schedule_validation(self): """ Schedule full validation to be performed in the background. This executes the full_clean_code() method """ print('scheduling full code validation... (we are now executing on the' 'foreground).') self.mark_invalid_code_fields() def mark_invalid_code_fields(self): """ Performs a full code validation with .full_clean_code() and marks all errors found in the question. """ return try: self.full_clean(force_expansions=True) except ValidationError as ex: print(ex) print(dir(ex)) raise def validate_tests(self): """ Triggered when (pre|post)_test_source changes or on the first time the .clean() method is called. """ # Check if new source is valid for attr in ['pre_tests_source', 'post_tests_source']: try: source = getattr(self, attr) if source: iospec = parse_iospec(source) else: iospec = None setattr(self, attr[:-7], iospec) except Exception as ex: self.clear_tests() raise ValidationError( {attr: _('invalid iospec syntax: %s' % ex)}) # Computes temporary expansions for all sources. A second step may be # required in which we use the reference source in answer key to further # expand iospec data structures iospec = self.pre_tests.copy() iospec.expand_inputs(self.number_of_pre_expansions) self.pre_tests_expanded = iospec if self.pre_tests_source and self.post_tests_source: iospec = ejudge.combine_iospecs(self.pre_tests, self.post_tests) elif self.post_tests_source: iospec = self.post_tests.copy() elif self.pre_tests_source: iospec = self.pre_tests.copy() else: raise ValidationError( _('either pre_tests_source or post_tests_source must be given!' )) iospec.expand_inputs(self.number_of_post_expansions) # assert len(iospec) >= self.number_of_expansions, iospec self.post_tests_expanded = iospec if self.pre_tests_expanded.is_expanded and \ self.post_tests_expanded.is_expanded: self.pre_tests_expanded_source = self.pre_tests_expanded.source() self.post_tests_expanded_source = self.post_tests_expanded.source() else: self._expand_from_answer_keys() # Iospec is valid: save the hash self.tests_state_hash = self.current_tests_hash def _expand_from_answer_keys(self): # If the source requires expansion, we have to check all answer keys # to see if one of them defines a valid source and compute the expansion # from this source. All languages must produce the same expansion, # otherwise it is considered to be an error. # # If no answer key is available, leave pre_tests_expanded_source blank assert self.pre_tests_expanded is not None assert self.post_tests_expanded is not None pre, post = self.pre_tests_expanded, self.post_tests_expanded useful_keys = list(self.answers_with_code()) if useful_keys: ex_pre = pre.copy() ex_pre.expand_inputs(self.number_of_pre_expansions) ex_post = post.copy() ex_post.expand_inputs(self.number_of_post_expansions) pre_list = self.answers.expand_all(ex_pre) post_list = self.answers.expand_all(ex_post) if len(pre_list) == len(post_list) == 1: ex_pre = pre_list[0] ex_post = post_list[0] else: def validate(L, field): first, *tail = L for i, elem in enumerate(tail, 1): if first == elem: continue lang1 = useful_keys[0].language lang2 = useful_keys[i].language first.language = lang1 elem.language = lang2 self.clear_tests() raise validators.inconsistent_testcase_error( first, elem, field) validate(pre_list, 'pre_tests_expanded_source') validate(post_list, 'post_tests_expanded_source') ex_pre, ex_post = pre_list[0], post_list[0] # Update values self.pre_tests_expanded = ex_pre self.pre_tests_expanded_source = ex_pre.source() self.post_tests_expanded = ex_pre self.post_tests_expanded_source = ex_post.source() # Data access def get_placeholder(self, language=None): """ Return the placeholder text for the given language. """ key = self.answers.get(language or self.language, None) if key is None: return self.default_placeholder return key.placeholder def get_reference_source(self, language=None): """ Return the reference source code for the given language or None, if no reference is found. """ if language is None: language = self.language qs = self.answers.all().filter( language=get_programming_language(language)) if qs: return qs.get().source return '' def get_submission_kwargs(self, request, kwargs): return dict(language=kwargs['language'], source=kwargs['source']) # Access answer key queryset def answers_with_code(self): """ Filter only answers that define a program. """ return self.answers.exclude(source='') def has_code_changed(self): """ True if some answer source for a valid code has changed. """ keys = self.answers_with_code() for key in keys: if key.has_changed_source(): return True return False # Actions def submit(self, user_or_request, language=None, **kwargs): if language and self.language: if language != self.language: args = language, self.language raise ValueError('cannot set language: %r != %r' % args) if self.language: language = self.language language = get_programming_language(language) return super().submit(user_or_request, language=language, **kwargs) def run_post_grading(self, **kwargs): """ Runs post tests for all submissions made to this question. """ for response in self.responses.all(): response.run_post_grading(tests=self.post_tests_expanded, **kwargs) self.closed = True self.save() def nav_section_for_activity(self, request): url = self.get_absolute_url section = NavSection(__('Question'), url(), title=__('Back to question')) if self.rules.test(request.user, 'activities.edit_activity'): section.add_link(__('Edit'), self.get_admin_url(), title=__('Edit question')) section.add_link(__('Submissions'), url('submissions'), title=__('View your submissions')) return section # Serving pages and routing template = 'questions/coding_io/detail.jinja2' template_submissions = 'questions/coding_io/submissions.jinja2' template_statistics = 'questions/coding_io/statistics.jinja2' template_debug = 'questions/coding_io/debug.jinja2' def get_context(self, request, *args, **kwargs): context = dict(super().get_context(request, *args, **kwargs), form=True) # Select default mode for the ace editor if self.language: context['default_mode'] = self.language.ace_mode() else: context['default_mode'] = get_config('CODESCHOOL_DEFAULT_ACE_MODE', 'python') # Enable language selection if self.language is None: context['select_language'] = True context['languages'] = ProgrammingLanguage.supported.all() else: context['select_language'] = False return context def serve_ajax_submission(self, client, source=None, language=None, **kwargs): """ Handles student responses via AJAX and a srvice program. """ # User must choose language if not language or language == '-----': if self.language is None: fmt = _('Error'), _('Please select the correct language') client.dialog( '<p class="dialog-text"><h2>%s</h2><p>%s</p></p>' % fmt) return None language = self.language else: language = get_programming_language(language) return super().serve_ajax_submission( client=client, language=language, source=source, ) @srvice.route(r'^placeholder/$') def route_placeholder(self, request, language): """ Return the placeholder code for some language. """ return self.get_placehoder(language) # # Actions # def regrade_post(self): """ Regrade all submissions using the post tests. """ self.responses.regrade_with(self.post_tests_expanded) def action_expand_tests(self, client, *args, **kwargs): self._expand_tests() pre = escape(self.pre_tests_expanded_source) post = escape(self.post_tests_expanded_source) client.dialog('<h2>Pre-tests</h2><pre>%s</pre>' '<h2>Post-test</h2><pre>%s</pre>' % (pre, post)) def action_grade_with_post_tests(self, client, *args, **kwargs): self.regrade_post() client.dialog('<p>Successful operation!</p>')
class Course(models.DateFramedModel, models.TimeStampedModel): """One specific occurrence of a course for a given teacher in a given period.""" discipline = models.ForeignKey(Discipline, related_name='courses') teacher = models.ForeignKey(models.User, related_name='owned_courses') students = models.ManyToManyField( models.User, related_name='enrolled_courses', blank=True, ) staff = models.ManyToManyField( models.User, related_name='courses_as_staff', blank=True, ) current_lesson_index = models.PositiveIntegerField(default=0, blank=True) current_lesson_start = models.DateField(blank=True, null=True) is_active = models.BooleanField(_('is active'), default=False) objects = CourseQueryset.as_manager() # Discipline properties name = property(lambda x: x.discipline.name) short_description = property(lambda x: x.discipline.short_description) long_description = property(lambda x: x.discipline.long_description) short_description_html = property( lambda x: x.discipline.short_description_html) long_description_html = property( lambda x: x.discipline.long_description_html) # Other properties owner = property(lambda x: x.teacher) def __str__(self): return '%s (%s)' % (self.discipline.name, self.teacher.first_name) def to_file(self): """Serialize object in a Markdown format.""" @classmethod def from_file(self, file): """Load course from file.""" def register_student(self, student): """ Register a new student in the course. """ self.students.add(student) self.update_friendship_status(student) def update_friendship_status(self, student=None): """ Recompute the friendship status for a single student by marking it as a colleague of all participants in the course.. If no student is given, update the status of all enrolled students. """ update = self._update_friendship_status if student is None: for student in self.students.all(): update(student) else: update(student) def _update_friendship_status(self, student): # Worker function for update_friendship_status colleague_status = FriendshipStatus.STATUS_COLLEAGUE for colleague in self.students.all(): if colleague != student: try: FriendshipStatus.objects.get(owner=student, other=colleague) except FriendshipStatus.DoesNotExist: FriendshipStatus.objects.create(owner=student, other=colleague, status=colleague_status) # Managers @property def past_activities(self): return (self.activities.filter(status=Activity.STATUS_CLOSED) | self.activities.filter( end__lt=timezone.now())).select_subclasses() @property def open_activities(self): return (self.activities.timeframed.all() & self.activities.filter( status=Activity.STATUS_OPEN)).select_subclasses() @property def pending_activities(self): return (self.activities.filter(status=Activity.STATUS_DRAFT) | (self.activities.filter(status=Activity.STATUS_OPEN) & self.activities.filter(end__lt=timezone.now())) ).select_subclasses() def get_absolute_url(self): return url_reverse('course-detail', args=(self.pk, )) def get_user_role(self, user): """Return a string describing the most priviledged role the user as in the course. The possible values are: teacher: Owns the course and can do any kind of administrative tasks in the course. staff: Teacher assistants. May have some privileges granted by the teacher. student: Enrolled students. visitor: Have no relation to the course. If course is marked as public, visitors can access the course contents. """ if user == self.teacher: return 'teacher' if user in self.staff.all(): return 'staff' if user in self.students.all(): return 'student' return 'visitor' def get_user_activities(self, user): """ Return a sequence of all activities that are still open for the user. """ activities = self.activities.filter(status=Activity.STATUS_OPEN) return activities.select_subclasses() def activity_duration(self): """ Return the default duration (in minutes) for an activity starting from now. """ return 120 def next_time_slot(self): """Return the start and end times for the next class in the course. If a time slot is currently open, return it.""" now = timezone.now() return now, now + timezone.timedelta(self.activity_duration()) def next_date(self, date=None): """Return the date of the next available time slot.""" def can_view(self, user): return True def can_edit(self, user): return user == self.teacher