def __init__(self, *args, **kwargs): if 'queryset' in kwargs: self.queryset = kwargs['queryset'] else: self.queryset = None ForeignKey.__init__(self, *args, **kwargs)
class MessageOptions(GmailOptions): app_label = 'mailchecker' model_name = 'message' verbose_name = 'message' verbose_name_raw = 'message' verbose_name_plural = 'messages' object_name = 'message' default_related_name = None _gmail_pk_field = 'id' _gmail_fields = { 'id': GmailAutoField(), 'receiver': CharField(max_length=200), 'sender': CharField(max_length=200), 'snippet': CharField(max_length=200), 'body': TextField(), } def _bind(self): super(MessageOptions, self)._bind() from .models import Thread, Message self.thread = ForeignKey(Thread) self.thread.contribute_to_class(Thread, 'thread') self.concrete_model = Message self._gmail_other_fields['thread'] = self.thread
def contribute_to_class(self, cls, name): self.name = name self.fk_field_name = name + '_fk' self.ft_field_name = name + '_ft' setattr(cls, name, self) fk_field = ForeignKey(self.foreign_model, blank=True, null=True) fk_field.contribute_to_class(cls, self.fk_field_name) ft_field = CharField(max_length=255, blank=True) ft_field.contribute_to_class(cls, self.ft_field_name)
def contribute_to_class(self, cls, name): self.name = name self.fk_field_name = name + '_fk' self.ft_field_name = name + '_ft' setattr(cls, name, self) fk_kwargs = dict(blank=True, null=True) if self.related_name: fk_kwargs['related_name'] = self.related_name fk_field = ForeignKey(self.foreign_model, **fk_kwargs) fk_field.contribute_to_class(cls, self.fk_field_name) ft_field = CharField(max_length=255, blank=True, null=True, default='') ft_field.contribute_to_class(cls, self.ft_field_name)
def __init__(self, field_name): # Compute new attributes attributes = {'__module__': __name__, 'key': CharField(max_length=255), 'value': PositiveIntegerField(default=0)} self.dbclass = type.__new__(type, "Stats%s" % (field_name), (Model,), attributes, ) ForeignKey.__init__(self, self.dbclass)
def _bind(self): super()._bind() from .models import Image, Instance self.thread = ForeignKey(Image) self.thread.contribute_to_class(Image, 'ami') self.concrete_model = Instance self._aws_other_fields['thread'] = self.thread
def _bind(self): super(MessageOptions, self)._bind() from .models import Thread, Message self.thread = ForeignKey(Thread) self.thread.contribute_to_class(Thread, 'thread') self.concrete_model = Message self._gmail_other_fields['thread'] = self.thread
class PersonTopic(Orderable): person = ParentalKey("Person", related_name="topics") topic = ForeignKey("topics.Topic", on_delete=CASCADE, related_name="+") panels = [PageChooserPanel("topic")]
class Category(Model): """ カテゴリのモデルです Fields ------ name : CharField カテゴリ名 isIncome : BooleanField 収入なら True 支出なら False writer : User カテゴリの作成者 Properties (Read Only) ---------------------- categoryName : CharField nameと同じ """ # Fields name = CharField(max_length=128) isIncome = BooleanField() # もとのユーザ削除時 -> このカテゴリも一緒に削除される (CASCADE) writer = ForeignKey(User, on_delete=CASCADE) class Meta: # 同一ユーザーが同じ名前のカテゴリを重複して持てないようにする unique_together = ('name', 'writer', 'isIncome') # properties @property def categoryName(self): return self.name # public methods def __str__(self): return self.name def setName(self, name: str) -> Optional[Model]: """ カテゴリ名をセットし、カテゴリ自身を返します。 nameが空欄のときは、セットせず、Noneを返します。 Parameters ---------- name : str 新しいカテゴリ名 Returns ------- category : Category or None セット成功時、このカテゴリオブジェクト セット失敗時、None """ category = self if name else None if category: category.name = name return category
class ScriptSet(CleanSave, Model): class Meta(DefaultMeta): """Needed for South to recognize this model.""" objects = ScriptSetManager() last_ping = DateTimeField(blank=True, null=True) node = ForeignKey('maasserver.Node', on_delete=CASCADE) result_type = IntegerField(choices=RESULT_TYPE_CHOICES, editable=False, default=RESULT_TYPE.COMMISSIONING) power_state_before_transition = CharField(max_length=10, null=False, blank=False, choices=POWER_STATE_CHOICES, default=POWER_STATE.UNKNOWN, editable=False) def __str__(self): return "%s/%s" % (self.node.system_id, self.result_type_name) def __iter__(self): for script_result in self.scriptresult_set.all(): yield script_result @property def result_type_name(self): return RESULT_TYPE_CHOICES[self.result_type][1] @property def status(self): qs = self.scriptresult_set.all() # The status order below represents the order of precedence. for status in (SCRIPT_STATUS.RUNNING, SCRIPT_STATUS.INSTALLING, SCRIPT_STATUS.PENDING, SCRIPT_STATUS.ABORTED, SCRIPT_STATUS.FAILED, SCRIPT_STATUS.FAILED_INSTALLING, SCRIPT_STATUS.TIMEDOUT, SCRIPT_STATUS.DEGRADED): for script_result in qs: if script_result.status == status: if status == SCRIPT_STATUS.INSTALLING: # When a script is installing the script set is # running. return SCRIPT_STATUS.RUNNING elif status == SCRIPT_STATUS.TIMEDOUT: # A timeout causes the node to go into a failed status # so show the ScriptSet as failed. return SCRIPT_STATUS.FAILED elif status == SCRIPT_STATUS.FAILED_INSTALLING: # Installation failure causes the node to go into a # failed status so show the ScriptSet as failed. return SCRIPT_STATUS.FAILED else: return status return SCRIPT_STATUS.PASSED @property def status_name(self): return SCRIPT_STATUS_CHOICES[self.status][1] @property def started(self): qs = self.scriptresult_set.all() if qs.exists(): return qs.earliest('started').started else: return None @property def ended(self): qs = self.scriptresult_set.all() if not qs.exists(): return None elif qs.filter(ended=None).exists(): return None else: return qs.latest('ended').ended @property def runtime(self): if None not in (self.ended, self.started): runtime = self.ended - self.started return str(runtime - timedelta(microseconds=runtime.microseconds)) else: return '' def find_script_result(self, script_result_id=None, script_name=None): """Find a script result in the current set.""" if script_result_id is not None: try: return self.scriptresult_set.get(id=script_result_id) except ObjectDoesNotExist: pass else: for script_result in self: if script_result.name == script_name: return script_result return None def regenerate(self): """Regenerate any ScriptResult which has a storage parameter. Deletes and recreates ScriptResults for any ScriptResult which has a storage parameter. Used after commissioning has completed when there are tests to be run. """ # Avoid circular dependencies. from metadataserver.models import ScriptResult regenerate_scripts = {} for script_result in self.scriptresult_set.filter( status=SCRIPT_STATUS.PENDING).exclude(parameters={}): # If there are multiple storage devices on the system for every # script which contains a storage type parameter there will be # one ScriptResult per device. If we already know a script must # be regenearted it can be deleted as the device the ScriptResult # is for may no longer exist. Regeneratation below will generate # ScriptResults for each existing storage device. if script_result.script in regenerate_scripts: script_result.delete() continue # Check if the ScriptResult contains any storage type parameter. If # so remove the value of the storage parameter only and add it to # the list of Scripts which must be regenearted. for param_name, param in script_result.parameters.items(): if param['type'] == 'storage': # Remove the storage parameter as the storage device may no # longer exist. The ParametersForm will set the default # value(all). script_result.parameters.pop(param_name) regenerate_scripts[ script_result.script] = script_result.parameters script_result.delete() break for script, params in regenerate_scripts.items(): form = ParametersForm(data=params, script=script, node=self.node) if not form.is_valid(): err_msg = ( "Removing Script %s from ScriptSet due to regeneration " "error - %s" % (script.name, dict(form.errors))) logger.error(err_msg) Event.objects.create_node_event( system_id=self.node.system_id, event_type=EVENT_TYPES.SCRIPT_RESULT_ERROR, event_description=err_msg) continue for i in form.cleaned_data['input']: ScriptResult.objects.create(script_set=self, status=SCRIPT_STATUS.PENDING, script=script, script_name=script.name, parameters=i) def delete(self, force=False, *args, **kwargs): if not force and self in { self.node.current_commissioning_script_set, self.node.current_installation_script_set }: # Don't allow deleting current_commissioing_script_set as it is # the data set MAAS used to gather hardware information about the # node. The current_installation_script_set is only set when a node # is deployed. Don't allow it to be deleted as it contains info # about the OS deployed. raise ValidationError( 'Unable to delete the current %s script set for node: %s' % (self.result_type_name.lower(), self.node.fqdn)) elif self == self.node.current_testing_script_set: # MAAS uses the current_testing_script_set to know what testing # script set should be shown by default in the UI. If an older # version exists set the current_testing_script_set to it. try: previous_script_set = self.node.scriptset_set.filter( result_type=RESULT_TYPE.TESTING) previous_script_set = previous_script_set.exclude(id=self.id) previous_script_set = previous_script_set.latest('id') except ScriptSet.DoesNotExist: pass else: self.node.current_testing_script_set = previous_script_set self.node.save(update_fields=['current_testing_script_set']) return super().delete(*args, **kwargs)
class EventPhoto(Photo): event = ForeignKey(Event, related_name='photos')
class TopicRef(Model): topic = ForeignKey(Topic, on_delete=CASCADE, related_name='references') qId = CharField(max_length=20)
class BulkManagerTestModel(Model): parent = ForeignKey("BulkManagerParentTestModel", editable=False, on_delete=CASCADE) objects = BulkManager()
def __init__(self, **kwargs): kwargs.setdefault('to', settings.AUTH_USER_MODEL) kwargs.setdefault('null', True) kwargs.setdefault('blank', True) ForeignKey.__init__(self, **kwargs)
class MessageHistory(Model): time = BigIntegerField(default=get_milliseconds) message = ForeignKey('Message', CASCADE, null=False) content = TextField(null=True, blank=True) giphy = URLField(null=True, blank=True)
class Template(CleanSave, TimestampedModel): """A generic `Template` object, with a link to a default version and an optional custom version. :ivar filename: The filename of this template (/etc/mass/templates/X) :ivar default_version: The default value for this template. Useful if the template needs to be reset after an accidental edit. MAAS is responsible for the default_version; the user is not allowed to set it. :ivar version: The current in-use version of this template. If this exists, the specified template will be used (rather than the default). If not specified, the default_version will be used. """ class Meta(DefaultMeta): """Needed for South to recognize this model.""" verbose_name = "Template" verbose_name_plural = "Templates" objects = TemplateManager() filename = CharField( editable=True, max_length=64, blank=False, null=False, unique=True, help_text="Template filename") default_version = ForeignKey( "VersionedTextFile", on_delete=CASCADE, editable=False, blank=False, null=False, related_name="default_templates", help_text="Default data for this template.") version = ForeignKey( "VersionedTextFile", on_delete=CASCADE, editable=True, blank=True, null=True, related_name="templates", help_text="Custom data for this template.") @property def value(self): if self.version is not None: return self.version.data else: return self.default_version.data @property def default_value(self): return self.default_version.data @property def is_default(self): return self.version is None def revert(self, verbosity=0, stdout=sys.stdout): self.version = None self.save() if verbosity > 0: stdout.write("Reverted template to default: %s\n" % self.filename) def update(self, new_text, comment=None): if self.version is None: version = self.default_version else: version = self.version self.version = version.update(new_text, comment) def delete(self, *args, **kwargs): if self.default_version is not None: # By deleting the oldest version, the deletion should cascade # to all other versions of the file. self.default_version.get_oldest_version().delete(*args, **kwargs) return super().delete(*args, **kwargs)
class Profile(models.Model): def __unicode__(self): return unicode(self.user) def get_absolute_url(self): return reverse('competition_detail', kwargs={'pk': str(self.pk)}) user = models.OneToOneField(User) feature_level = IntegerField(choices=FEATURE_LEVELS, default=1) managed_profiles = models.ManyToManyField('Profile', related_name='managers', null=True, blank=True) # managed_users = models.ManyToManyField(User, related_name='managers', null=True, blank=True) #first_competition = models.ForeignKey(Competition, null=True, blank=True) #registration_code = CodeField(null=True, blank=True) created_codes = ManyToManyField(Code, null=True, blank=True, related_name='creator_set') received_codes = ManyToManyField(Code, null=True, blank=True, related_name='recipient_set') used_codes = ManyToManyField(Code, null=True, blank=True, related_name='user_set') question_sets = ManyToManyField(QuestionSet, null=True, blank=True) questions = ManyToManyField(Question, null=True, blank=True) merged_with = ForeignKey('Profile', null=True, blank=True, related_name='former_profile_set') # merged_with = ForeignKey(User, null = True, blank=True, related_name='merged_set') update_used_codes_timestamp = DateTimeField(null=True, blank=True) update_managers_timestamp = DateTimeField(null=True, blank=True) vcard = models.TextField(blank=True) @property def first_name(self): return self.user.first_name @property def last_name(self): return self.user.last_name @property def email(self): return self.user.email @property def username(self): return self.user.username def __superiors(self, codegen, known): for c in self.received_codes.filter(format=codegen.format, salt=codegen.salt): for o in c.owner_set.all(): if o not in known: s1 = superiors(o, codegen, known) known = s1.union(known) known.add(o) return known def update_used_codes(self): if self.update_used_codes_timestamp is None: attempts = self.attempt_set.all() else: attempts = self.attempt_set.filter( start__gte=self.update_used_codes_timestamp) self.update_used_codes_timestamp = timezone.now() for a in attempts: try: codegen = a.competitionquestionset.competition.competitor_code_generator codes = Code.objects.filter(value=a.access_code, salt=codegen.salt, format=codegen.format) for c in codes: used_codes.add(c) except Exception, e: print e pass
class Auteur(CommonModel): # Une contrainte de base de données existe dans les migrations # pour éviter que les deux soient remplis. oeuvre = ForeignKey('Oeuvre', null=True, blank=True, related_name='auteurs', verbose_name=_('œuvre'), on_delete=CASCADE) source = ForeignKey('Source', null=True, blank=True, related_name='auteurs', verbose_name=_('source'), on_delete=CASCADE) # Une contrainte de base de données existe dans les migrations # pour éviter que les deux soient remplis. individu = ForeignKey('Individu', related_name='auteurs', null=True, blank=True, verbose_name=_('individu'), on_delete=PROTECT) ensemble = ForeignKey('Ensemble', related_name='auteurs', null=True, blank=True, verbose_name=_('ensemble'), on_delete=PROTECT) profession = ForeignKey('Profession', related_name='auteurs', null=True, blank=True, verbose_name=_('profession'), on_delete=PROTECT) objects = AuteurManager() class Meta(object): verbose_name = _('auteur') verbose_name_plural = _('auteurs') ordering = ('profession', 'ensemble', 'individu') @staticmethod def invalidated_relations_when_saved(all_relations=False): return ( 'oeuvre', 'source', ) def html(self, tags=True): return force_text(AuteurBiGrouper((self, ), tags=tags)) html.short_description = _('rendu HTML') html.allow_tags = True def clean(self): if self.oeuvre is not None and self.profession is None: raise ValidationError( {'profession': ugettext('This field is required.')}) if self.individu_id is not None and self.ensemble_id is not None: msg = ugettext('« Individu » et « Ensemble » ' 'ne peuvent être saisis sur la même ligne.') raise ValidationError({'individu': msg, 'ensemble': msg}) if self.individu is not None and self.profession is not None: try: self.individu.professions.add(self.profession) except (Individu.DoesNotExist, Profession.DoesNotExist): pass def __eq__(self, other): if not isinstance(other, self.__class__): return False return self.individu == other.individu \ and self.profession == other.profession def __str__(self): return self.html(tags=False) @permalink def get_absolute_url(self): return 'individu_detail', (self.individu_id, )
class WineDrinker(Versionable): name = CharField(max_length=200) glass_content = ForeignKey(Wine, related_name='drinkers', null=True) __str__ = versionable_description
def __init__(self, **kwargs): kwargs.setdefault('to', User) kwargs.setdefault('null', True) kwargs.setdefault('blank', True) ForeignKey.__init__(self, **kwargs)
class AppRating(TranslatableModel): app = ForeignKey('App', related_name='ratings', verbose_name=_('App'), on_delete=CASCADE) user = ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('User'), on_delete=CASCADE, related_name='app_ratings') rating = FloatField(verbose_name=_('Rating'), default=0.5, help_text=_('Rating from 0.0 (worst) to 1.0 (best)')) rated_at = DateTimeField(auto_now=True, db_index=True) translations = TranslatedFields( comment=TextField(verbose_name=_('Rating comment'), default='', help_text=_('Rating comment in Markdown'), blank=True)) class Meta: unique_together = (('app', 'user'), ) verbose_name = _('App rating') verbose_name_plural = _('App ratings') ordering = ('-rated_at', ) def __str__(self) -> str: return str(self.rating) def save(self, *args, **kwargs): super().save(*args, **kwargs) # update rating on the app app = self.app day_range = settings.RATING_RECENT_DAY_RANGE threshold = settings.RATING_THRESHOLD rating, num = self._compute_app_rating(day_range, threshold) app.rating_recent = rating app.rating_num_recent = num rating, num = self._compute_app_rating(threshold=threshold) app.rating_overall = rating app.rating_num_overall = num app.save() def _compute_app_rating(self, days: int = -1, threshold: int = 5) -> Tuple[float, int]: """ Computes an app rating based on :param days: passing 30 will only consider ratings from the last 30 days, pass a negative number to include all ratings :param threshold: if the amount of ratings is lower than this number return 0.5 :return: the app rating """ app_ratings = AppRating.objects.filter(app=self.app) if days >= 0: range = timezone.now() - datetime.timedelta(days=days) app_ratings = app_ratings.filter(rated_at__gte=range) ratings = map(lambda r: r.rating, app_ratings) return compute_rating(list(ratings), threshold)
class Service(Model): user = ForeignKey(User, verbose_name='User id', on_delete=CASCADE) car_model = CharField(verbose_name='Car model', max_length=100) description = TextField(verbose_name='Description') cost = FloatField(verbose_name='Cost') datetime = DateTimeField(verbose_name='Datetime', auto_now=True)
class AppRelease(TranslatableModel): version = CharField(max_length=256, verbose_name=_('Version'), help_text=_('Version follows Semantic Versioning')) app = ForeignKey('App', on_delete=CASCADE, verbose_name=_('App'), related_name='releases') # dependencies php_extensions = ManyToManyField( 'PhpExtension', blank=True, through='PhpExtensionDependency', verbose_name=_('PHP extension dependency')) databases = ManyToManyField('Database', blank=True, through='DatabaseDependency', verbose_name=_('Database dependency')) licenses = ManyToManyField('License', verbose_name=_('License')) shell_commands = ManyToManyField( 'ShellCommand', blank=True, verbose_name=_('Shell command dependency')) php_version_spec = CharField(max_length=256, verbose_name=_('PHP version requirement')) platform_version_spec = CharField( max_length=256, verbose_name=_('Platform version requirement')) raw_php_version_spec = CharField( max_length=256, verbose_name=_('PHP version requirement (raw)')) raw_platform_version_spec = CharField( max_length=256, verbose_name=_('Platform version requirement (raw)')) min_int_size = IntegerField(blank=True, default=32, verbose_name=_('Minimum Integer bits'), help_text=_('e.g. 32 for 32bit Integers')) download = URLField(max_length=256, blank=True, verbose_name=_('Archive download URL')) created = DateTimeField(auto_now_add=True, editable=False, verbose_name=_('Created at')) last_modified = DateTimeField(auto_now=True, editable=False, db_index=True, verbose_name=_('Updated at')) signature = TextField( verbose_name=_('Signature'), help_text=_('A signature using the app\'s certificate')) signature_digest = CharField(max_length=256, verbose_name=_('Signature hashing algorithm')) translations = TranslatedFields(changelog=TextField( verbose_name=_('Changelog'), help_text=_('The release changelog. Can contain Markdown'), default='')) is_nightly = BooleanField(verbose_name=_('Nightly'), default=False) class Meta: verbose_name = _('App release') verbose_name_plural = _('App releases') unique_together = (('app', 'version', 'is_nightly'), ) ordering = ['-version'] def can_update(self, user: User) -> bool: return self.app.owner == user or user in self.app.co_maintainers.all() def can_delete(self, user: User) -> bool: return self.can_update(user) def __str__(self) -> str: return '%s %s' % (self.app, self.version) def is_compatible(self, platform_version, inclusive=False): """Checks if a release is compatible with a platform version :param platform_version: the platform version, not required to be semver compatible :param inclusive: if True the check will also return True if an app requires 9.0.1 and the given platform version is 9.0 :return: True if compatible, otherwise false """ min_version = Version(pad_min_version(platform_version)) spec = Spec(self.platform_version_spec) if inclusive: max_version = Version(pad_max_inc_version(platform_version)) return (min_version in spec or max_version in spec) else: return min_version in spec @property def is_unstable(self): return self.is_nightly or '-' in self.version
class HomePage(Page): subpage_types = ['content.ContentPage'] template = 'home.html' # Content fields subtitle = TextField(max_length=250, blank=True, default='') button_text = CharField(max_length=30, blank=True, default='') button_url = CharField(max_length=2048, blank=True, default='') image = ForeignKey( 'mozimages.MozImage', null=True, blank=True, on_delete=SET_NULL, related_name='+' ) featured = StreamField( StreamBlock([ ('article', PageChooserBlock(required=False, target_model=( 'articles.Article', 'externalcontent.ExternalArticle', ))), ('external_page', FeaturedExternalBlock()), ], min_num=0, max_num=4), null=True, blank=True, ) about_title = TextField(max_length=250, blank=True, default='') about_subtitle = TextField(max_length=250, blank=True, default='') about_button_text = CharField(max_length=30, blank=True, default='') about_button_url = URLField(max_length=140, blank=True, default='') # Card fields card_title = CharField('Title', max_length=140, blank=True, default='') card_description = TextField('Description', max_length=140, blank=True, default='') card_image = ForeignKey( 'mozimages.MozImage', null=True, blank=True, on_delete=SET_NULL, related_name='+', verbose_name='Image', ) # Meta fields keywords = ClusterTaggableManager(through=HomePageTag, blank=True) # Editor panel configuration content_panels = Page.content_panels + [ MultiFieldPanel( [ FieldPanel('subtitle'), FieldPanel('button_text'), FieldPanel('button_url'), ], heading="Header section", ), ImageChooserPanel('image'), StreamFieldPanel('featured'), MultiFieldPanel( [ FieldPanel('about_title'), FieldPanel('about_subtitle'), FieldPanel('about_button_text'), FieldPanel('about_button_url'), ], heading="About section", ) ] # Card panels card_panels = [ FieldPanel('card_title'), FieldPanel('card_description'), ImageChooserPanel('card_image'), ] # Meta panels meta_panels = [ MultiFieldPanel([ FieldPanel('seo_title'), FieldPanel('search_description'), FieldPanel('keywords'), ], heading='SEO'), ] # Settings panels settings_panels = [ FieldPanel('slug'), ] # Tabs edit_handler = TabbedInterface([ ObjectList(content_panels, heading='Content'), ObjectList(card_panels, heading='Card'), ObjectList(meta_panels, heading='Meta'), ObjectList(settings_panels, heading='Settings', classname='settings'), ]) @property def primary_topics(self): """The site’s top-level topics, i.e. topics without a parent topic.""" from ..topics.models import Topic return Topic.objects.filter(parent_topics__isnull=True).live().public().order_by('title')
class App(TranslatableModel): objects = AppManager() id = CharField(max_length=256, unique=True, primary_key=True, verbose_name=_('ID'), help_text=_('app ID, identical to folder name')) categories = ManyToManyField('Category', verbose_name=_('Category')) translations = TranslatedFields( name=CharField(max_length=256, verbose_name=_('Name'), help_text=_('Rendered app name for users')), summary=CharField( max_length=256, verbose_name=_('Summary'), help_text=_('Short text describing the app\'s purpose')), description=TextField(verbose_name=_('Description'), help_text=_('Will be rendered as Markdown'))) # resources user_docs = URLField(max_length=256, blank=True, verbose_name=_('User documentation URL')) admin_docs = URLField(max_length=256, blank=True, verbose_name=_('Admin documentation URL')) developer_docs = URLField(max_length=256, blank=True, verbose_name=_('Developer documentation URL')) issue_tracker = URLField(max_length=256, blank=True, verbose_name=_('Issue tracker URL')) website = URLField(max_length=256, blank=True, verbose_name=_('Homepage')) discussion = URLField(max_length=256, blank=True, verbose_name=_('Forum')) created = DateTimeField(auto_now_add=True, editable=False, verbose_name=_('Created at')) last_modified = DateTimeField(auto_now=True, editable=False, db_index=True, verbose_name=_('Updated at')) owner = ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('App owner'), on_delete=CASCADE, related_name='owned_apps') co_maintainers = ManyToManyField(settings.AUTH_USER_MODEL, blank=True, verbose_name=_('Co-Maintainers'), related_name='co_maintained_apps') authors = ManyToManyField('AppAuthor', blank=True, related_name='apps', verbose_name=_('App authors')) is_featured = BooleanField(verbose_name=_('Featured'), default=False) rating_recent = FloatField(verbose_name=_('Recent rating'), default=0.5) rating_overall = FloatField(verbose_name=_('Overall rating'), default=0.5) rating_num_recent = IntegerField( verbose_name=_('Number of recently submitted ratings'), default=0) rating_num_overall = IntegerField( verbose_name=_('Number of overall submitted ratings'), default=0) last_release = DateTimeField(editable=False, db_index=True, verbose_name=_('Last release at'), default=timezone.now) certificate = TextField(verbose_name=_('Certificate')) ownership_transfer_enabled = BooleanField( verbose_name=_('Ownership transfer enabled'), default=False, help_text=_('If enabled, a user can try to register the same app ' 'again using the public certificate and signature. If he ' 'does, the app will be transferred to him.')) class Meta: verbose_name = _('App') verbose_name_plural = _('Apps') def __str__(self) -> str: return self.name def can_update(self, user: User) -> bool: return self.owner == user or user in self.co_maintainers.all() def can_delete(self, user: User) -> bool: return self.owner == user @property def discussion_url(self): if self.discussion: return self.discussion else: return '%s/c/apps/%s' % (settings.DISCOURSE_URL, self.id.replace('_', '-')) def _get_grouped_releases(self, get_release_func): releases = NextcloudRelease.objects.all() versions = map(lambda r: r.version, releases) compatible_releases = map(lambda v: (v, get_release_func(v)), versions) grouped_releases = group_by_main_version(dict(compatible_releases)) # deduplicate releases result = {} for version, releases in grouped_releases.items(): result[version] = list(distinct(releases, lambda r: r.version)) return result def releases_by_platform_v(self): """Looks up all compatible stable releases for each platform version. Example of returned dict: {'9.1': [<AppRelease object>, <AppRelease object>], '9.0': [<AppRelease object>]} :return dict with all compatible stable releases for each platform version. """ return self._get_grouped_releases(self.compatible_releases) def unstable_releases_by_platform_v(self): """Looks up all compatible unstable releases for each platform version. Example of returned dict: {'9.1': [<AppRelease object>, <AppRelease object>], '9.0': [<AppRelease object>]} :return dict with all compatible unstable releases for each platform version. """ return self._get_grouped_releases(self.compatible_unstable_releases) def latest_releases_by_platform_v(self): """Looks up the latest stable and unstable release for each platform version. Example of returned dict: {'9.1': { 'stable': <AppRelease object>, 'unstable': <AppRelease object> }, '9.0': { 'stable': <AppRelease object> }} :return dict with the latest stable and unstable release for each platform version. """ stable = self.releases_by_platform_v() unstable = self.unstable_releases_by_platform_v() def filter_latest(pair): version, releases = pair return (version, self._latest(releases)) latest_stable = dict(map(filter_latest, stable.items())) latest_unstable = dict(map(filter_latest, unstable.items())) all_versions = set(chain(latest_stable.keys(), latest_unstable.keys())) def stable_or_unstable_releases(ver): return (ver, { 'stable': latest_stable.get(ver, None), 'unstable': latest_unstable.get(ver, None) }) return dict(map(stable_or_unstable_releases, all_versions)) def compatible_releases(self, platform_version, inclusive=True): """Returns all stable releases of this app that are compatible with the given platform version. :param inclusive: Use inclusive version check (see AppRelease.is_compatible()). :return a sorted list of all compatible stable releases. """ return sorted( filter( lambda r: r.is_compatible(platform_version, inclusive) and not r.is_unstable, self.releases.all()), key=lambda r: AppSemVer(r.version, r.is_nightly, r.last_modified), reverse=True) def compatible_unstable_releases(self, platform_version, inclusive=True): """Returns all unstable releases of this app that are compatible with the given platform version. :param inclusive: Use inclusive version check (see AppRelease.is_compatible()). :return a sorted list of all compatible unstable releases. """ return sorted( filter( lambda r: r.is_compatible(platform_version, inclusive) and r. is_unstable, self.releases.all()), key=lambda r: AppSemVer(r.version, r.is_nightly, r.last_modified), reverse=True) def _latest(self, releases): try: return max(releases, key=lambda r: AppSemVer(r.version, r.is_nightly, r. last_modified)) except ValueError: return None def save(self, *args, **kwargs): # If the certificate has changed, delete all releases. try: if self.pk is not None: orig = App.objects.get(pk=self.pk) current = self.certificate.replace('\r', '').strip() former = orig.certificate.replace('\r', '').strip() # for some reason the django admin inserts \r\n for \n so # saving a model in the admin with the same cert kills all # releases if current != former: self.releases.all().delete() except self.DoesNotExist: pass super().save(*args, **kwargs)
class FilesystemGroup(CleanSave, TimestampedModel): """A filesystem group. Contains a set of filesystems that create a virtual block device. E.g. LVM Volume Group. :ivar uuid: UUID of the filesystem group. :ivar group_type: Type of filesystem group. :ivar name: Name of the filesytem group. :ivar create_params: Parameters that can be passed during the create command when the filesystem group is created. """ class Meta(DefaultMeta): """Needed for South to recognize this model.""" objects = FilesystemGroupManager() uuid = CharField(max_length=36, unique=True, null=False, blank=False, editable=False) group_type = CharField(max_length=20, null=False, blank=False, choices=FILESYSTEM_GROUP_TYPE_CHOICES) name = CharField(max_length=255, null=False, blank=False) create_params = CharField(max_length=255, null=True, blank=True) cache_mode = CharField(max_length=20, null=True, blank=True, choices=CACHE_MODE_TYPE_CHOICES) cache_set = ForeignKey(CacheSet, null=True, blank=True, on_delete=CASCADE) def __str__(self): return '%s device %s %d' % (self.group_type, self.name, self.id) @property def virtual_device(self): """Return the linked `VirtualBlockDevice`. This should never be called when the group_type is LVM_VG. `virtual_devices` should be used in that case, since LVM_VG supports multiple `VirtualBlockDevice`s. """ if self.is_lvm(): raise AttributeError("virtual_device should not be called when " "group_type = LVM_VG.") else: # Return the first `VirtualBlockDevice` since it is the only one. # Using 'all()' instead of 'first()' so that if it was precached # that cache will be used. return get_one(self.virtual_devices.all()) def get_node(self): """`Node` this filesystem group belongs to.""" if self.filesystems.count() == 0: return None return self.filesystems.first().get_node() def get_size(self): """Size of this filesystem group. Calculated from the total size of all filesystems in this group. Its not calculated from its virtual_block_device size. The linked `VirtualBlockDevice` should calculate its size from this filesystem group. """ if self.is_lvm(): return self.get_lvm_size() elif self.is_raid(): return self.get_raid_size() elif self.is_bcache(): return self.get_bcache_size() else: return 0 def get_lvm_size(self): """Size of this LVM volume group. Calculated from the total size of all filesystems in this group. Its not calculated from its virtual_block_device size. Note: Should only be called when the `group_type` is LVM_VG. """ filesystems = list(self.filesystems.all()) if len(filesystems) == 0: return 0 else: pv_total_size = sum(filesystem.get_size() for filesystem in filesystems) number_of_extents, _ = divmod(pv_total_size, LVM_PE_SIZE) # Reserve one extent per filesystem for LVM headers - lp:1517129. return (number_of_extents - len(filesystems)) * LVM_PE_SIZE def get_smallest_filesystem_size(self): """Return the smallest filesystem size.""" filesystems = list(self.filesystems.all()) if len(filesystems) == 0: return 0 else: return min(filesystem.get_size() for filesystem in filesystems) def get_raid_size(self): """Size of this RAID. Calculated based on the RAID type and how output size based on that type. The size will be calculated using the smallest size filesystem attached to this RAID. The linked `VirtualBlockDevice` should calculate its size from this filesystem group. Note: Should only be called when the `group_type` is in `FILESYSTEM_GROUP_RAID_TYPES`. """ min_size = self.get_smallest_filesystem_size() if min_size <= 0: # Possible when no filesytems are attached to this group. return 0 elif self.group_type == FILESYSTEM_GROUP_TYPE.RAID_0: return min_size * self.filesystems.count() elif self.group_type == FILESYSTEM_GROUP_TYPE.RAID_1: return min_size else: num_raid = len([ fstype for fstype in self._get_all_fstypes() if fstype == FILESYSTEM_TYPE.RAID ]) if self.group_type == FILESYSTEM_GROUP_TYPE.RAID_5: return min_size * (num_raid - 1) elif self.group_type == FILESYSTEM_GROUP_TYPE.RAID_6: return min_size * (num_raid - 2) elif self.group_type == FILESYSTEM_GROUP_TYPE.RAID_10: return min_size * num_raid / 2 raise ValidationError("Unknown raid type: %s" % self.group_type) def get_bcache_backing_filesystem(self): """Return the filesystem that is the backing device for the Bcache.""" for filesystem in self.filesystems.all(): if filesystem.fstype == FILESYSTEM_TYPE.BCACHE_BACKING: return filesystem return None def get_bcache_backing_block_device(self): """Return the block_device that is the backing device for the Bcache. This will return the block device even if the backing is a partition. """ filesystem = self.get_bcache_backing_filesystem() if filesystem is not None: if filesystem.block_device is not None: return filesystem.block_device else: return filesystem.partition.partition_table.block_device return None def get_bcache_size(self): """Size of this Bcache. Calculated based on the size of the backing device. The linked `VirtualBlockDevice` should calculate its size from this filesystem group. Note: Should only be called when the `group_type` is BCACHE. """ backing_filesystem = self.get_bcache_backing_filesystem() if backing_filesystem is None: return 0 else: return backing_filesystem.get_size() def get_lvm_allocated_size(self, skip_volumes=[]): """Returns the space already allocated to virtual block devices. Calculated from the total size of all virtual block devices in this group. """ return sum(logical_volume.size for logical_volume in self.virtual_devices.all() if logical_volume not in skip_volumes) def get_lvm_free_space(self, skip_volumes=[]): """Returns the total unallocated space on this FilesystemGroup""" return self.get_lvm_size() - self.get_lvm_allocated_size( skip_volumes=skip_volumes) def clean(self, *args, **kwargs): super(FilesystemGroup, self).clean(*args, **kwargs) # We allow the initial save to skip model validation, any # additional saves required filesystems linked. This is because the # object needs to exist in the database before the filesystems can # be linked. if not self.id: return # Grab all filesystems so that if the filesystems have been precached # it will be used. This prevents extra database queries. filesystems = list(self.filesystems.all()) # Must at least have a filesystem added to the group. if len(filesystems) == 0: raise ValidationError( "At least one filesystem must have been added.") # All filesystems must belong all to the same node. nodes = {filesystem.get_node() for filesystem in filesystems} if len(nodes) > 1: raise ValidationError( "All added filesystems must belong to the same node.") # Validate all the different group types. if self.is_lvm(): self._validate_lvm(filesystems=filesystems) elif self.is_raid(): self._validate_raid(filesystems=filesystems) elif self.is_bcache(): self._validate_bcache(filesystems=filesystems) def is_lvm(self): """Return True if `group_type` is LVM_VG type.""" return self.group_type == FILESYSTEM_GROUP_TYPE.LVM_VG def is_raid(self): """Return True if `group_type` is a RAID type.""" return self.group_type in FILESYSTEM_GROUP_RAID_TYPES def is_bcache(self): """Return True if `group_type` is BCACHE type.""" return self.group_type == FILESYSTEM_GROUP_TYPE.BCACHE def _get_all_fstypes(self, filesystems=None): """Return list of all filesystem types attached.""" # Grab all filesystems so that if the filesystems have been # precached it will be used. This prevents extra database queries. if filesystems is None: filesystems = list(self.filesystems.all()) return [filesystem.fstype for filesystem in filesystems] def _validate_lvm(self, filesystems=None): """Validate attached filesystems are correct type for LVM_VG. """ if not self.is_lvm(): return unique_fstypes = set(self._get_all_fstypes(filesystems=filesystems)) if unique_fstypes != set([FILESYSTEM_TYPE.LVM_PV]): raise ValidationError( "Volume group can only contain lvm physical volumes.") def _validate_raid(self, filesystems=None): """Validate attached filesystems are correct count and type for RAID. """ if not self.is_raid(): return fstypes = self._get_all_fstypes(filesystems=filesystems) num_raid = len( [fstype for fstype in fstypes if fstype == FILESYSTEM_TYPE.RAID]) num_raid_spare = len([ fstype for fstype in fstypes if fstype == FILESYSTEM_TYPE.RAID_SPARE ]) if self.group_type == FILESYSTEM_GROUP_TYPE.RAID_0: # RAID 0 can contain 2 or more RAID filesystems and no spares are # allowed. if num_raid < 2 or num_raid_spare != 0: raise ValidationError( "RAID level 0 must have at least 2 raid devices and " "no spares.") elif self.group_type == FILESYSTEM_GROUP_TYPE.RAID_1: # RAID 1 must have at least 2 RAID filesystems. if num_raid < 2: raise ValidationError( "RAID level 1 must have at least 2 raid devices and " "any number of spares.") elif self.group_type == FILESYSTEM_GROUP_TYPE.RAID_5: # RAID 5 must have at least 3 RAID filesystems, but can have # spares. if num_raid < 3: raise ValidationError( "RAID level 5 must have at least 3 raid devices and " "any number of spares.") elif self.group_type == FILESYSTEM_GROUP_TYPE.RAID_6: # RAID 6 must have at least 4 RAID filesystems, but can have # spares. if num_raid < 4: raise ValidationError( "RAID level 6 must have at least 4 raid devices and " "any number of spares.") elif self.group_type == FILESYSTEM_GROUP_TYPE.RAID_10: # RAID 10 must have at least 4 RAID filesystems, but can have # spares. if num_raid < 3: raise ValidationError( "RAID level 10 must have at least 3 raid devices and " "any number of spares.") num_raid_invalid = len([ fstype for fstype in fstypes if (fstype != FILESYSTEM_TYPE.RAID and fstype != FILESYSTEM_TYPE.RAID_SPARE) ]) if num_raid_invalid > 0: raise ValidationError( "RAID can only contain raid device and raid spares.") def _validate_bcache(self, filesystems=None): """Validate attached filesystems are correct type for BCACHE. """ if not self.is_bcache(): return # Circular imports. from maasserver.models.virtualblockdevice import VirtualBlockDevice filesystems = [ filesystem.fstype for filesystem in self.filesystems.all() ] if filesystems != [FILESYSTEM_TYPE.BCACHE_BACKING]: raise ValidationError( "Bcache can only contain one backing device.") backing_block_device = self.get_bcache_backing_block_device() backing_block_device = backing_block_device.actual_instance if isinstance(backing_block_device, VirtualBlockDevice): if backing_block_device.filesystem_group.is_lvm(): raise ValidationError( "Bcache cannot use a logical volume as a backing device.") if self.cache_mode is None: raise ValidationError("Bcache requires cache mode to be set.") if self.cache_set is None: raise ValidationError("Bcache requires an assigned cache set.") def save(self, *args, **kwargs): # Prevent the group_type from changing. This is not supported and will # break the linked filesystems and the created virtual block device(s). if self.pk is not None and self._state.has_changed('group_type'): orig_type = self._state.get_old_value('group_type') if orig_type != self.group_type: raise ValidationError( "Cannot change the group_type of a FilesystemGroup.") # Prevent saving if the size of the volume group is now smaller than # the total size of logical volumes. if (self.group_type == FILESYSTEM_GROUP_TYPE.LVM_VG and self.get_lvm_free_space() < 0): raise ValidationError( "Volume group cannot be smaller than its logical volumes.") # Set the name correctly based on the type and generate a new UUID # if needed. if self.group_type is not None and self.name is None: self.name = FilesystemGroup.objects.get_available_name_for(self) if not self.uuid: self.uuid = uuid4() super(FilesystemGroup, self).save(*args, **kwargs) # Update or create the virtual block device when the filesystem group # is saved. Does nothing if group_type is LVM_VG. Virtual block device # is not created until filesystems are linked because the filesystems # contain the node that this filesystem group belongs to. if self.filesystems.count() > 0: from maasserver.models.virtualblockdevice import VirtualBlockDevice VirtualBlockDevice.objects.create_or_update_for(self) def delete(self, force=False): """Delete from the database. :param force: Delete any related object that prevents this object from being deleted. """ if self.is_lvm(): if self.virtual_devices.count() > 0: if force: # Delete the linked virtual block devices, since the # deletion of this object is forced. self.virtual_devices.all().delete() else: # Don't allow the filesystem group to be deleted if virtual # block devices are linked. You cannot delete a volume # group that has logical volumes. raise ValidationError( "This volume group has logical volumes; it cannot be " "deleted.") else: # For the other types we delete the virtual block device. virtual_device = self.virtual_device if virtual_device is not None: self.virtual_device.delete() # Possible that the virtual block device has already deleted the # filesystem group. Skip the call if no id is set. if self.id is not None: super(FilesystemGroup, self).delete() def get_nice_name(self): """Return the nice name for the filesystem group. This is used when showing error or log messages. """ if self.is_lvm(): return "volume group" elif self.is_raid(): return "RAID" elif self.is_bcache(): return "Bcache" else: raise ValueError("Unknown group_type.") def get_name_prefix(self): """Return the prefix that should be used when setting the name of this FilesystemGroup.""" if self.is_lvm(): return "vg" elif self.is_raid(): return "md" elif self.is_bcache(): return "bcache" else: raise ValidationError("Unknown group_type.") def get_virtual_block_device_block_size(self): """Return the block size that should be used on a created `VirtualBlockDevice` for this filesystem group.""" if self.is_lvm(): # Default for logical volume in LVM is 4096. return 4096 elif self.is_raid(): # mdadm by default creates raid devices with 512 block size. return 512 elif self.is_bcache(): # Bcache uses the block_size of the backing device. return self.get_bcache_backing_filesystem().get_block_size() else: raise ValidationError("Unknown group_type.")
class SQLQuery(models.Model): query = TextField() start_time = DateTimeField(null=True, blank=True, default=timezone.now) end_time = DateTimeField(null=True, blank=True) time_taken = FloatField(blank=True, null=True) request = ForeignKey('Request', related_name='queries', null=True, blank=True, db_index=True) traceback = TextField() objects = SQLQueryManager() @property def traceback_ln_only(self): return '\n'.join(self.traceback.split('\n')[::2]) @property def formatted_query(self): return sqlparse.format(self.query, reindent=True, keyword_case='upper') # TODO: Surely a better way to handle this? May return false positives @property def num_joins(self): return self.query.lower().count('join ') @property def tables_involved(self): """A rreally ather rudimentary way to work out tables involved in a query. TODO: Can probably parse the SQL using sqlparse etc and pull out table info that way?""" components = [x.strip() for x in self.query.split()] tables = [] for idx, c in enumerate(components): # TODO: If django uses aliases on column names they will be falsely identified as tables... if c.lower() == 'from' or c.lower() == 'join' or c.lower() == 'as': try: nxt = components[idx + 1] if not nxt.startswith('('): # Subquery stripped = nxt.strip().strip(',') if stripped: tables.append(stripped) except IndexError: # Reach the end pass return tables @transaction.atomic() def save(self, *args, **kwargs): if self.end_time and self.start_time: interval = self.end_time - self.start_time self.time_taken = interval.total_seconds() * 1000 if not self.pk: if self.request: self.request.num_sql_queries += 1 self.request.save() super(SQLQuery, self).save(*args, **kwargs) @transaction.atomic() def delete(self, *args, **kwargs): self.request.num_sql_queries -= 1 self.request.save() super(SQLQuery, self).delete(*args, **kwargs)
class Task(AbstractEntity): """Defines the workflow task""" request = ForeignKey(Request, related_name='tasks') assignee = ForeignKey(Group) updated_by = ForeignKey(User) activity_ref = CharField(max_length=100) status = CharField(verbose_name="Status", max_length=30, choices=TASK_STATUS) @property def activity(self): """Returns the activity associated with the task""" flow = flow_config(self.request.module_ref).FLOW return getattr(self, flow[self.activity_ref]['model']().title.lower(), None) @property def is_active(self): """Checks if the current task is active / most recent""" return self == self.request.tasks.latest('id') @property def is_final(self): """Checks if the current task is final / end task""" transitions = transition_config(self.request.module_ref, self.activity_ref) return not transitions @property def previous(self): """Returns previous task""" return Task.objects.filter(request=self.request, id__lt=self.id).latest('id') @property def can_view_activity(self): """Checks if activity can be viewed""" return self.activity @property def can_initiate_activity(self): """Checks if new activity can be initiated""" return not self.activity @property def can_revise_activity(self): """Checks if activity can be revised""" return all([self.activity, self.is_active]) @property def can_rollback(self): """Checks if activity can be rolled back""" return not any([self.activity.is_initial, self.status == 'Completed']) def initiate(self): """Initializes the task""" self.status = 'In Progress' self.save() def submit(self, module, user, next_activity=None): """Submits the task""" config = flow_config(module) transitions = transition_config(module, self.activity_ref) role = Group.objects.get(name=config.FLOW[next_activity]['role']) self.status = 'Completed' self.save() if transitions is not None: Task.objects.create(request=self.request, assignee=role, updated_by=user, activity_ref=next_activity, status='Not Started') else: self.request.status = 'Completed' self.request.save() def rollback(self): """Rollback to previous task""" previous = self.previous previous.status = 'Rolled Back' previous.save() self.status = 'Rolled Back' self.save() # Clone Task task = self.previous task.id = None task.status = 'Not Started' task.save() # Clone Activity activity = self.previous.activity activity.id = None activity.task = task activity.save()
class Request(FeedbackTrait): requested_by = ForeignKey('yunity.User', null=True) time = DateTimeField(null=True) status = MaxLengthCharField()
class Person(BasePage): resource_type = "person" parent_page_types = ["People"] subpage_types = [] template = "person.html" # Content fields nickname = CharField(max_length=250, null=True, blank=True) job_title = CharField(max_length=250) role = CharField(max_length=250, choices=ROLE_CHOICES, default="staff") description = RichTextField( "About", blank=True, default="", features=RICH_TEXT_FEATURES_SIMPLE, help_text="Optional ‘About me’ section content, supports rich text", ) # Card fields card_title = CharField("Title", max_length=140, blank=True, default="") card_description = TextField("Description", max_length=DESCRIPTION_MAX_LENGTH, blank=True, default="") card_image = ForeignKey( "mozimages.MozImage", null=True, blank=True, on_delete=SET_NULL, related_name="+", verbose_name="Image", help_text="An image in 16:9 aspect ratio", ) # Meta city = CharField(max_length=250, blank=True, default="") country = CountryField(verbose_name="Country or Region", blank=True, default="") twitter = CharField(max_length=250, blank=True, default="") facebook = CharField(max_length=250, blank=True, default="") linkedin = CharField(max_length=250, blank=True, default="") github = CharField(max_length=250, blank=True, default="") email = CharField(max_length=250, blank=True, default="") websites = StreamField( StreamBlock([("website", PersonalWebsiteBlock())], max_num=3, required=False), null=True, blank=True, help_text="Optional links to any other personal websites", ) keywords = ClusterTaggableManager(through=PersonTag, blank=True) # Content panels content_panels = [ MultiFieldPanel( [ CustomLabelFieldPanel("title", label="Full name"), FieldPanel("nickname"), FieldPanel("job_title"), FieldPanel("role"), ], heading="Details", ), FieldPanel("description"), ] # Card panels card_panels = [ FieldPanel("card_title"), FieldPanel("card_description"), MultiFieldPanel( [ImageChooserPanel("card_image")], heading="16:9 Image", help_text=( "Image used for representing this page as a Card. " "Should be 16:9 aspect ratio. " "If not specified a fallback will be used. " "This image is also shown when sharing this page via social " "media unless a social image is specified."), ), ] # Meta panels meta_panels = [ MultiFieldPanel( [FieldPanel("city"), FieldPanel("country")], heading="Location", help_text=("Location fields. The country field is also filterable " "via the people directory page."), ), MultiFieldPanel([InlinePanel("topics")], heading="Topics this person specializes in"), MultiFieldPanel( [ FieldPanel("twitter"), FieldPanel("facebook"), FieldPanel("linkedin"), FieldPanel("github"), FieldPanel("email"), ], heading="Profiles", help_text="", ), StreamFieldPanel("websites"), MultiFieldPanel( [ FieldPanel("seo_title"), FieldPanel("search_description"), ImageChooserPanel("social_image"), FieldPanel("keywords"), ], heading="SEO", help_text=( "Optional fields to override the default title and description " "for SEO purposes"), ), ] # Settings panels settings_panels = BasePage.settings_panels + [FieldPanel("slug")] # Tabs edit_handler = TabbedInterface([ ObjectList(content_panels, heading="Content"), ObjectList(card_panels, heading="Card"), ObjectList(meta_panels, heading="Meta"), ObjectList(settings_panels, heading="Settings", classname="settings"), ]) # Search config search_fields = BasePage.search_fields + [ # Inherit search_fields from Page # "title" is already specced in BasePage index.SearchField("description"), # Add FilterFields for things we may be filtering on (eg topics) index.FilterField("slug"), ] @property def display_title(self): """ Return the display title for profile pages. Adds a nickname to the person's full name when one is provided. """ return f'{self.title} aka "{self.nickname}"' if self.nickname else self.title @property def events(self): """ Return upcoming events where this person is a speaker, ordered by start date """ from ..events.models import Event upcoming_events = Event.published_objects.filter( start_date__gte=get_past_event_cutoff()) speaker_events = Event.published_objects.none() for event in upcoming_events.all(): # add the event to the list if the current person is a speaker if event.has_speaker(self): speaker_events = speaker_events | Event.published_objects.page( event) return speaker_events.order_by("start_date") @property def articles(self): """ Return articles and external articles where this person is (one of) the authors, ordered by article date, most recent first """ from ..articles.models import Article from ..externalcontent.models import ExternalArticle articles = Article.published_objects.none() external_articles = ExternalArticle.published_objects.none() all_articles = Article.published_objects.all() all_external_articles = ExternalArticle.published_objects.all() for article in all_articles: if article.has_author(self): articles = articles | Article.published_objects.page(article) for external_article in all_external_articles: if external_article.has_author(self): external_articles = external_articles | ( ExternalArticle.published_objects.page(external_article)) return sorted(chain(articles, external_articles), key=attrgetter("date"), reverse=True) @property def videos(self): """ Return the most recent videos and external videos where this person is (one of) the speakers. """ from ..videos.models import Video from ..externalcontent.models import ExternalVideo videos = Video.published_objects.none() external_videos = ExternalVideo.published_objects.none() all_videos = Video.published_objects.all() all_external_videos = ExternalVideo.published_objects.all() for video in all_videos: if video.has_speaker(self): videos = videos | Video.published_objects.page(video) for external_video in all_external_videos: if external_video.has_speaker(self): external_videos = external_videos | ( ExternalVideo.published_objects.page(external_video)) return sorted(chain(videos, external_videos), key=attrgetter("date"), reverse=True) @property def role_group(self): return { "slug": self.role, "title": dict(ROLE_CHOICES).get(self.role, "") } @property def country_group(self): return ({ "slug": self.country.code.lower(), "title": self.country.name } if self.country else { "slug": "" }) def get_topics(self) -> List: """Return the live/published Topic pages associated with this Person""" # Note that we do this in Python because django-modelcluster won't support # `filter(topic__live=True)` when _previewing_ pages (even tho it'll work # on saved ones) topics = [pt.topic for pt in self.topics.all()] return [t for t in topics if t.live]
class VersionTrait(BaseModel): _VersionTrait_to_BaseModel = OneToOneField('yunity.BaseModel', parent_link=True) next_version = ForeignKey('self', null=True, related_name='previous_version')
class StaticIPAddress(CleanSave, TimestampedModel): class Meta(DefaultMeta): verbose_name = "Static IP Address" verbose_name_plural = "Static IP Addresses" unique_together = ("alloc_type", "ip") # IP can be none when a DHCP lease has expired: in this case the entry # in the StaticIPAddress only materializes the connection between an # interface and a subnet. ip = MAASIPAddressField( unique=False, null=True, editable=False, blank=True, default=None, verbose_name="IP", ) alloc_type = IntegerField(editable=False, null=False, blank=False, default=IPADDRESS_TYPE.AUTO) # Subnet is only null for IP addresses allocate before the new networking # model. subnet = ForeignKey("Subnet", editable=True, blank=True, null=True, on_delete=CASCADE) user = ForeignKey( User, default=None, blank=True, null=True, editable=False, on_delete=PROTECT, ) # Used only by DISCOVERED address to set the lease_time for an active # lease. Time is in seconds. lease_time = IntegerField(default=0, editable=False, null=False, blank=False) # Used to mark a `StaticIPAddress` as temperary until the assignment # can be confirmed to be free in the subnet. temp_expires_on = DateTimeField(null=True, blank=True, editable=False, db_index=True) objects = StaticIPAddressManager() def __str__(self): # Attempt to show the symbolic alloc_type name if possible. type_names = map_enum_reverse(IPADDRESS_TYPE) strtype = type_names.get(self.alloc_type, "%s" % self.alloc_type) return "%s:type=%s" % (self.ip, strtype) @property def alloc_type_name(self): """Returns a human-readable representation of the `alloc_type`.""" return IPADDRESS_TYPE_CHOICES_DICT.get(self.alloc_type, "") def get_node(self): """Return the Node of the first Interface connected to this IP address.""" interface = self.get_interface() if interface is not None: return interface.get_node() else: return None def get_interface(self): """Return the first Interface connected to this IP address.""" # Note that, while this relationship is modeled as a many-to-many, # MAAS currently only relates a single interface per IP address # at this time. In the future, we may want to model virtual IPs, in # which case this will need to change. interface = self.interface_set.first() return interface def get_interface_link_type(self): """Return the `INTERFACE_LINK_TYPE`.""" if self.alloc_type == IPADDRESS_TYPE.AUTO: return INTERFACE_LINK_TYPE.AUTO elif self.alloc_type == IPADDRESS_TYPE.DHCP: return INTERFACE_LINK_TYPE.DHCP elif self.alloc_type == IPADDRESS_TYPE.USER_RESERVED: return INTERFACE_LINK_TYPE.STATIC elif self.alloc_type == IPADDRESS_TYPE.STICKY: if not self.ip: return INTERFACE_LINK_TYPE.LINK_UP else: return INTERFACE_LINK_TYPE.STATIC else: raise ValueError("Unknown alloc_type.") def get_log_name_for_alloc_type(self): """Return a nice log name for the `alloc_type` of the IP address.""" return IPADDRESS_TYPE_CHOICES_DICT[self.alloc_type] def is_linked_to_one_unknown_interface(self): """Return True if the IP address is only linked to one unknown interface.""" interface_types = [ interface.type for interface in self.interface_set.all() ] return interface_types == [INTERFACE_TYPE.UNKNOWN] def get_related_discovered_ip(self): """Return the related DISCOVERED IP address for this IP address. This comes from looking at the DISCOVERED IP addresses assigned to the related interfaces. """ interfaces = list(self.interface_set.all()) discovered_ips = [ ip for ip in StaticIPAddress.objects.filter( interface__in=interfaces, alloc_type=IPADDRESS_TYPE.DISCOVERED, ip__isnull=False, ).order_by("-id") if ip.ip ] if len(discovered_ips) > 0: return discovered_ips[0] else: return None def get_ip(self): """Return the IP address assigned.""" ip, subnet = self.get_ip_and_subnet() return ip def get_ip_and_subnet(self): """Return the IP address and subnet assigned. For all alloc_types except DHCP it returns `ip` and `subnet`. When `alloc_type` is DHCP it returns the associated DISCOVERED `ip` and `subnet` on the same linked interfaces. """ if self.alloc_type == IPADDRESS_TYPE.DHCP: discovered_ip = self.get_related_discovered_ip() if discovered_ip is not None: return discovered_ip.ip, discovered_ip.subnet return self.ip, self.subnet def deallocate(self): """Mark this IP address as no longer in use. After return, this object is no longer valid. """ self.delete() def clean_subnet_and_ip_consistent(self): """Validate that the IP address is inside the subnet.""" # USER_RESERVED addresses must have an IP address specified. # Blank AUTO, STICKY and DHCP addresses have a special meaning: # - Blank AUTO addresses mean the interface will get an IP address # auto assigned when it goes to be deployed. # - Blank STICKY addresses mean the interface should come up and be # associated with a particular Subnet, but no IP address should # be assigned. # - DHCP IP addresses are always blank. The model will look for # a DISCOVERED IP address on the same interface to map to the DHCP # IP address with `get_ip()`. if self.alloc_type == IPADDRESS_TYPE.USER_RESERVED: if not self.ip: raise ValidationError( {"ip": ["IP address must be specified."]}) if self.alloc_type == IPADDRESS_TYPE.DHCP: if self.ip: raise ValidationError( {"ip": ["IP address must not be specified."]}) if self.ip and self.subnet and self.subnet.cidr: address = self.get_ipaddress() network = self.subnet.get_ipnetwork() if address not in network: raise ValidationError({ "ip": [ "IP address %s is not within the subnet: %s." % (str(address), str(network)) ] }) def get_ipaddress(self): """Returns this StaticIPAddress wrapped in an IPAddress object. :return: An IPAddress, (or None, if the IP address is unspecified) """ if self.ip: return IPAddress(self.ip) else: return None def get_mac_addresses(self): """Return set of all MAC's linked to this ip.""" return set(interface.mac_address for interface in self.interface_set.all()) def clean(self, *args, **kwargs): super(StaticIPAddress, self).clean(*args, **kwargs) self.clean_subnet_and_ip_consistent() def validate_unique(self, exclude=None): """Overrides Django's default for validating unique columns. Django's ORM has a misfeature: `Model.validate_unique` -- which our CleanSave mix-in calls -- checks every unique key against the database before actually saving the row. Django runs READ COMMITTED by default, which means there's a racey period between the uniqueness validation check and the actual insert. """ pass def _set_subnet(self, subnet, interfaces=None): """Resets the Subnet for this StaticIPAddress, making sure to update the VLAN for a related Interface (if the VLAN has changed). """ self.subnet = subnet if interfaces is not None: for iface in interfaces: if (iface is not None and subnet is not None and iface.vlan_id != subnet.vlan_id): iface.vlan = subnet.vlan iface.save() def render_json(self, with_username=False, with_summary=False): """Render a representation of this `StaticIPAddress` object suitable for converting to JSON. Includes optional parameters wherever a join would be implied by including a specific piece of information.""" # Circular imports. # XXX mpontillo 2016-03-11 we should do the formatting client side. from maasserver.websockets.base import dehydrate_datetime data = { "ip": self.ip, "alloc_type": self.alloc_type, "created": dehydrate_datetime(self.created), "updated": dehydrate_datetime(self.updated), } if with_username and self.user is not None: data["user"] = self.user.username if with_summary: iface = self.get_interface() node = self.get_node() if node is not None: data["node_summary"] = { "system_id": node.system_id, "node_type": node.node_type, "fqdn": node.fqdn, "hostname": node.hostname, "is_container": node.parent_id is not None, } if iface is not None: data["node_summary"]["via"] = iface.get_name() if (with_username and self.alloc_type != IPADDRESS_TYPE.DISCOVERED): # If a user owns this node, overwrite any username we found # earlier. A node's owner takes precedence. if node.owner and node.owner.username: data["user"] = node.owner.username if len(self.dnsresource_set.all()) > 0: # This IP address is used as DNS resource. dns_records = [{ "id": resource.id, "name": resource.name, "domain": resource.domain.name, } for resource in self.dnsresource_set.all()] data["dns_records"] = dns_records if self.bmc_set.exists(): # This IP address is used as a BMC. bmcs = [{ "id": bmc.id, "power_type": bmc.power_type, "nodes": [{ "system_id": node.system_id, "hostname": node.hostname, } for node in bmc.node_set.all()], } for bmc in self.bmc_set.all()] data["bmcs"] = bmcs return data def set_ip_address(self, ipaddr, iface=None): """Sets the IP address to the specified value, and also updates the subnet field. The new subnet is determined by calling get_best_subnet_for_ip() on the SubnetManager. If an interface is supplied, the Interface's VLAN is also updated to match the VLAN of the new Subnet. """ self.ip = ipaddr # Cases we need to handle: # (0) IP address is being cleared out (remains within Subnet) # (1) IP address changes to another address within the same Subnet # (2) IP address changes to another address with a different Subnet # (3) IP address changes to an address within an unknown Subnet if not ipaddr: # (0) Nothing to be done. We're clearing out the IP address. return if self.ip and self.subnet: if self.get_ipaddress() in self.subnet.get_ipnetwork(): # (1) Nothing to be done. Already in an appropriate Subnet. return else: # (2) and (3): the Subnet has changed (could be to None) subnet = Subnet.objects.get_best_subnet_for_ip(ipaddr) # We must save here, otherwise it's possible that we can't # traverse the interface_set many-to-many. self.save() self._set_subnet(subnet, interfaces=self.interface_set.all())
class Calendar(Model): """A delivery calendar. A calendar is attached to one and only one zone. There are two types of calendars: default and exceptions. The default calendar (only one default calendar per zone might exist) does not specify a validity period, while exceptions do define validity period (which must be non overlapping for every single zone, i.e. a zone can't have two overlapping exceptions both valid at the same time). The exceptions, when they are valid, take precedence over the default calendar. The calendar also defines *slack days*, meaning that a shipping cannot be booked to a time that is before the time of the booking plus the slack days: e.g. if ``slack_days`` is equal to two I can't book a delivery today for tomorrow but I shall book a later one. """ zone = ForeignKey(Zone, verbose_name=_(u"delivery zone")) valid_from = DateField(_(u"valid from"), blank=True, null=True, default=None) valid_to = DateField(_(u"valid to"), blank=True, null=True, default=None) slack_days = IntegerField(_(u"slack days"), validators=[MinValueValidator(0)]) def clean(self): # Checks that either ends of the validity period are specified or none # (default calendar) if (self.valid_from is None and self.valid_to is not None) or \ (self.valid_from is not None and self.valid_to is None): raise ValidationError( _(u"You must specify both ends of the validity period or " u"none at all")) # If this is a default calendar (no validity period) checks that it is # the only one if self.valid_from is None: filter_ = Q(zone=self.zone, valid_from__isnull=True, valid_to__isnull=True) if self.pk is not None: filter_ &= ~Q(pk=self.pk) other_default = self.__class__.objects.all().filter( filter_).count() if other_default > 0: raise ValidationError( _(u"You can specify only one default calendar (with no " u"validity period) per zone")) else: # Checks that the validity period makes sense if self.valid_from > self.valid_to: raise ValidationError( _(u"The validity period starts after it ends")) filter_ = Q(zone=self.zone) filter_ &= overlapping_query(self, 'valid_from', 'valid_to') if self.pk is not None: filter_ &= ~Q(pk=self.pk) # Checks that there aren't any overlapping definitions overlapping = self.__class__.objects.all().filter(filter_).count() if overlapping > 0: raise ValidationError( _(u"There is another calendar present in the validity " u"time of the current one")) def __unicode__(self): template = _(u"%(zone)s's calendar (%(from)s - %(to)s)") data = {'zone': unicode(self.zone)} if self.valid_from is None and self.valid_to is None: template = _(u"%(zone)s's default calendar") else: data['from'] = date_format(self.valid_from) data['to'] = date_format(self.valid_to) return template % data class Meta: verbose_name = _("calendar") verbose_name_plural = _("calendars") ordering = ['-valid_from']
class Ad(Model): APPROVAL_CHOICES = ( ('accepted', 'قبول'), ('denied', 'رد'), ) advertiser = ForeignKey(to=Advertiser, related_name='ads', verbose_name='تبلیغ کننده', on_delete=CASCADE) approve = CharField(max_length=10, choices=APPROVAL_CHOICES, verbose_name='وضعیت', default='denied') title = CharField(max_length=100, verbose_name='موضوع') img_url = URLField(verbose_name=' ادرس عکس تبلیغ', ) link = URLField(verbose_name='ادرس سایت شما') class Meta: verbose_name = 'تبلیغ' verbose_name_plural = 'تبلیغات' def __str__(self): return str(self.advertiser) + ' : ' + str(self.title) @staticmethod def get_total_ctr(start_time, end_time): return Ad.objects.annotate( total_views=Count(F('views'), distinct=True, filter=Q(views__time__range=(start_time, end_time))) ).filter(total_views__gt=0).annotate( ctr=Cast( Count(F('clicks'), distinct=True, filter=Q(clicks__time__range=( start_time, end_time))), FloatField()) / Cast( Count(F('views'), distinct=True, filter=Q(views__time__range=(start_time, end_time)), output_field=FloatField()), FloatField())).order_by( '-ctr') @staticmethod def get_total_clicks_views(start_time, end_time, delta): ads = Ad.objects.all() response = dict() for ad in ads: ad_response = list() start = start_time while start < end_time: end = start + timezone.timedelta(hours=delta) d = ad.get_dict_in_time_range(start_time=start, end_time=end) ad_response.append(d) start = end response.update({ad: ad_response}) return response def get_dict_in_time_range(self, start_time, end_time): clicks_count = self.clicks.filter(time__range=(start_time, end_time)).count() views_count = self.views.filter(time__range=(start_time, end_time)).count() d = dict() d.update({ 'start_time': start_time, 'end_time': end_time, 'total_clicks': clicks_count, 'total_view': views_count, }) return d def get_closest_view(self, ip, time): return self.views.filter(ip=ip, time__lt=time).order_by('-time')[0]
class Mission(Model): """A delivery mission. A delivery mission starts at a given time, on a given day, and visits the zone it is attached via calendar. It phisically corresponds to the delivery truck arriving in the zone the given day (e.g. "Tuesday") and staying there from ``starts`` to ``ends`` (e.g. from 08:00 to 12:00) to make the deliveries. All the associated shipments addresses will be visited in that timespan, according to the most comfortable route for the driver (there is no specific delivery hour but it can be anything between the given time period. It also defines a maximum number of shipments for the mission (e.g. the number of actual deliveries the driver can perform in the allowed time). This is later used in scheduling. """ calendar_day = ForeignKey(CalendarDay, verbose_name=_(u"calendar day")) starts = TimeField(_(u"starts at")) ends = TimeField(_(u"ends at")) shipments = IntegerField(_(u"maximum shipments"), validators=[MinValueValidator(0)]) def clean(self): # Checks the delivery period makes sense if self.starts > self.ends: raise ValidationError(_(u"The mission starts after it ends")) # We cannot have overlapping missions: there is no concept of # concurrent deliveries by multiple trucks filter_ = Q(calendar_day=self.calendar_day) filter_ &= overlapping_query(self, 'starts', 'ends') if self.pk is not None: filter_ &= ~Q(pk=self.pk) overlapping = self.__class__.objects.all().filter(filter_).count() if overlapping > 0: raise ValidationError( _(u"There is another mission to the same zone with a " u"conflicting schedule")) def remaining_shipments(self, date): """Returns the remaining shipments for a given day. Calculates how many "free delivery slots" there are for a given mission on a given day (e.g. "Tue Dec 20th, 2011"), by looking at the booked shipments for that day and subtracting those from the maximum number of possible shipments for that mission """ shippings_that_day = Shipment.objects.all().filter(mission=self, date=date).count() return self.shipments - shippings_that_day def __unicode__(self): return _(u"%(start)s - %(end)s, %(day)s") % { 'day': unicode(self.calendar_day), 'start': time_format(self.starts), 'end': time_format(self.ends) } class Meta: verbose_name = _("mission") verbose_name_plural = _("missions") ordering = ['starts']
class Pupitre(CommonModel): oeuvre = ForeignKey('Oeuvre', related_name='pupitres', verbose_name=_('œuvre'), on_delete=CASCADE) partie = ForeignKey('Partie', related_name='pupitres', verbose_name=_('rôle ou instrument'), on_delete=PROTECT) soliste = BooleanField(_('soliste'), default=False, db_index=True) quantite_min = IntegerField(_('quantité minimale'), default=1) quantite_max = IntegerField(_('quantité maximale'), default=1) facultatif = BooleanField( _('ad libitum'), default=False, ) objects = PupitreManager() class Meta(object): verbose_name = _('pupitre') verbose_name_plural = _('pupitres') ordering = ('-soliste', 'partie') @staticmethod def invalidated_relations_when_saved(all_relations=False): return ('oeuvre', ) def __str__(self): n_min = self.quantite_min n_max = self.quantite_max partie = self.partie.html(pluriel=n_max > 1, oeuvre=False, tags=False) if n_min != n_max: out = ugettext('%s à %s %s') % (apnumber(n_min), apnumber(n_max), partie) elif n_min > 1: out = f'{apnumber(n_min)} {partie}' else: out = partie if self.facultatif: out = format_html('{} <em>ad libitum</em>', out) return out def get_absolute_url(self): return self.partie.get_absolute_url() def html(self, tags=True): return href(self.get_absolute_url(), force_text(self), tags=tags) def related_label(self): out = force_text(self) if self.partie.oeuvre is not None: out += f' ({self.partie.oeuvre})' return out @staticmethod def autocomplete_search_fields(): return ( 'partie__nom__unaccent__icontains', 'partie__nom_pluriel__unaccent__icontains', 'partie__professions__nom__unaccent__icontains', 'partie__professions__nom_pluriel__unaccent__icontains', )
class Zone(Model): """A delivery zone. The city is divided into multiple zones, and delivery scheduling takes the zone as the atomic delivery destination used for semantical grouping of deliveries. """ site = ForeignKey(Site, verbose_name=_(u"site")) name = CharField(_(u"zone name"), max_length=256) active = BooleanField(_(u"active"), default=False) def default_calendar(self): try: return self.calendar_set.get(valid_from__isnull=True, valid_to__isnull=True) except Calendar.DoesNotExist: pass return None def check_validity(self): """Checks the "validity" of the zone. For a zone, being valid means that: * The default calendar exists * The default calendar has (at least) one day associated with it and that this contains (at least) one mission Should one of the two fail, ``ValidationError`` is raised. """ default_calendar = self.default_calendar() # Check that we have a default calendar if default_calendar is None: raise ValidationError( _(u"%(zone)s doesn't have a default calendar associated, " u"please add it") % {'zone': self.name}) # Checks that we have atleast one mission associated with one of the # days missions = Mission.objects.all().filter( calendar_day__calendar=default_calendar).count() if not (missions > 0): raise ValidationError( _(u"%(zone)s's default calendar is void, please add at least " u"one mission to it") % {'zone': self.name}) def validate(self): """If the zone is active, checks that it is still valid. Uses ``check_validity`` to check that the zone is still valid: if it isn't, the ``active`` property is set to false and the zone is saved. Returns the state of ``active`` after the check """ if not self.active: return self.active try: self.check_validity() except ValidationError: self.active = False self.save() return self.active def activate(self): """Activates the zone. It uses ``check_validity`` beforehand to check is activation is indeed possible. """ self.check_validity() self.active = True self.save() def deactivate(self): self.active = False self.save() def __unicode__(self): return self.name class Meta: verbose_name = _("zone") verbose_name_plural = _("zones") ordering = ['name']
class ForumBoard(Model): section = ForeignKey(ForumSection, on_delete=PROTECT, null=False, related_name='boards') title = CharField(max_length=128, null=False, blank=False) description = TextField(null=False) read_perm = ForeignKey(Permission, on_delete=PROTECT, null=True, related_name='readable_boards') write_perm = ForeignKey(Permission, on_delete=PROTECT, null=True, related_name='writable_boards') sort_index = IntegerField(default=0, null=False) deleted = BooleanField(default=False, null=False) def __str__(self) -> str: return self.title def can_read(self, user: User) -> bool: if self.read_perm is None: return True return has_perm_obj(user, self.read_perm) def can_write(self, user: User) -> bool: if not user.is_authenticated: return False if self.write_perm is None: return True return has_perm_obj(user, self.write_perm) def visible_threads(self, user: User = None) -> QuerySet: qs = self.threads.filter(deleted=False)\ .select_related('user', 'user__profile')\ .order_by('-sticky', '-modified_at') # Post count for the threads post_count_sq = ForumPost.objects\ .filter(thread=OuterRef('pk'), deleted=False).values('pk') qs = qs.annotate(total_posts=SQCount(post_count_sq)) # Latest post ID for the threads latest_post_sq = ForumPost.objects.filter( thread=OuterRef('pk'), deleted=False).order_by('-id').values('pk')[:1] qs = qs.annotate(latest_post_id=Subquery(latest_post_sq)) # Find out if threads have new content since last visit if user and user.is_authenticated: limit_ts = user.profile.last_all_read reads_sq = ForumLastRead.objects\ .filter(user=user, thread=OuterRef('pk'), created_at__gt=OuterRef('modified_at'))\ .values('thread') threads_sq = ForumThread.objects\ .filter(pk=OuterRef('pk'), deleted=False, modified_at__gt=limit_ts)\ .exclude(pk__in=Subquery(reads_sq))\ .values('pk') qs = qs.annotate(new_posts_count=SQCount(threads_sq)) return qs def get_latest_posts(self, ids: typing.List[int]) -> dict: sq = ForumPost.objects.filter(thread__board=self.pk, deleted=False).values('pk') qs = ForumPost.objects.filter(pk__in=ids)\ .select_related('user', 'user__profile', 'thread')\ .annotate(post_count=SQCount(sq)) return {x.id: x for x in qs.all()} class Meta: app_label = 'forum' indexes = [ Index(fields=['section', 'sort_index', 'deleted']), Index(fields=['deleted']), ]
class Filesystem(CleanSave, TimestampedModel): """A filesystem on partition or a block device. :ivar uuid: UUID of the filesystem. :ivar fstype: Type of filesystem. This can even be filesystems that cannot be mounted directly, e.g. LVM. :ivar partition: `Partition` this filesystem is on. If empty the filesystem must be directly on a `BlockDevice`. :ivar block_device: `BlockDevice` this filesystem is on. If empty the filesystem must be on a `Partition`. :ivar filesystem_group: `FilesystemGroup` this filesystem belongs to. :ivar create_params: Parameters that can be passed during the `mkfs` command when the filesystem is created. :ivar mount_point: Path to where this filesystem is mounted on the deployed operating system. :ivar mount_options: Parameters that are used to mount this filesystem on the deployed operating system. """ # All filesystem types. TYPES = frozenset(fstype for fstype, _ in FILESYSTEM_TYPE_CHOICES) # Filesystem types that expect to be mounted into the host's filesystem. # Essentially this means all filesystems except swap. TYPES_REQUIRING_MOUNT_POINT = frozenset( fstype for fstype, _ in FILESYSTEM_TYPE_CHOICES if fstype != FILESYSTEM_TYPE.SWAP) # Filesystem types that require storage on a block special device, i.e. a # block device or partition. TYPES_REQUIRING_STORAGE = frozenset( fstype for fstype, _ in FILESYSTEM_TYPE_CHOICES if fstype != FILESYSTEM_TYPE.RAMFS and fstype != FILESYSTEM_TYPE.TMPFS) class Meta(DefaultMeta): """Needed for South to recognize this model.""" unique_together = ( ("partition", "acquired"), ("block_device", "acquired"), ) objects = FilesystemManager() uuid = CharField(max_length=36, unique=False, null=False, blank=False, editable=False) fstype = CharField( max_length=20, choices=FILESYSTEM_TYPE_CHOICES, default=FILESYSTEM_TYPE.EXT4, ) partition = ForeignKey(Partition, unique=False, null=True, blank=True, on_delete=CASCADE) block_device = ForeignKey(BlockDevice, unique=False, null=True, blank=True, on_delete=CASCADE) node = ForeignKey( "Node", unique=False, null=True, blank=True, related_name="special_filesystems", on_delete=CASCADE, ) # XXX: For CharField, why allow null *and* blank? Would # CharField(null=False, blank=True, default="") not work better? label = CharField(max_length=255, null=True, blank=True) filesystem_group = ForeignKey( FilesystemGroup, null=True, blank=True, related_name="filesystems", on_delete=CASCADE, ) # XXX: For CharField, why allow null *and* blank? Would # CharField(null=False, blank=True, default="") not work better? create_params = CharField(max_length=255, null=True, blank=True) # XXX: For CharField, why allow null *and* blank? Would # CharField(null=False, blank=True, default="") not work better? mount_point = CharField(max_length=255, null=True, blank=True) # XXX: For CharField, why allow null *and* blank? Would # CharField(null=False, blank=True, default="") not work better? mount_options = CharField(max_length=255, null=True, blank=True) cache_set = ForeignKey( CacheSet, null=True, blank=True, related_name="filesystems", on_delete=CASCADE, ) # When a node is allocated all Filesystem objects assigned to that node # with mountable filesystems will be duplicated with this field set to # True. This allows a standard user to change this object as they want # and format other free devices. Once the node is released these objects # will be deleted. acquired = BooleanField(default=False) def get_node(self): """`Node` this filesystem belongs to.""" if self.partition is not None: return self.partition.get_node() elif self.block_device is not None: return self.block_device.node elif self.node is not None: return self.node else: # XXX: Explode instead? return None def get_physical_block_devices(self): """Return PhysicalBlockDevices backing the filesystem.""" from maasserver.models.virtualblockdevice import VirtualBlockDevice devices = [] parent = self.get_parent() if isinstance(parent, PhysicalBlockDevice): devices.append(parent) elif isinstance(parent, VirtualBlockDevice): for grandparent in parent.get_parents(): if isinstance(grandparent, Partition): grandparent = grandparent.partition_table.block_device device = grandparent.actual_instance if isinstance(device, PhysicalBlockDevice): devices.append(device) return devices def get_size(self): """Size of filesystem.""" if self.partition is not None: return self.partition.size elif self.block_device is not None: return self.block_device.size else: # XXX: Return None instead? return 0 def get_block_size(self): """Block size of partition table.""" if self.partition is not None: return self.partition.get_block_size() elif self.block_device is not None: return self.block_device.block_size else: # XXX: Return None instead? return 0 def get_parent(self): """Return linked `BlockDevice` or linked `Partition`.""" if self.partition is not None: return self.partition elif self.block_device is not None: return self.block_device.actual_instance elif self.node is not None: return self.node else: # XXX: Explode instead? return None @property def is_mountable(self): """Return True if this is a mountable filesystem.""" return self.fstype in FILESYSTEM_FORMAT_TYPE_CHOICES_DICT @property def is_mounted(self): """Return True if this filesystem is mounted.""" return self.mount_point is not None @property def uses_mount_point(self): """True if this filesystem can be mounted on a path. Swap partitions, for example, are not mounted at a particular point in the host's filesystem. """ return self.fstype in self.TYPES_REQUIRING_MOUNT_POINT @property def uses_storage(self): """True if this filesystem expects a block special device. ramfs and tmpfs, for example, exist only in memory. """ return self.fstype in self.TYPES_REQUIRING_STORAGE def clean(self, *args, **kwargs): super().clean(*args, **kwargs) parents = self.partition, self.block_device, self.node # You have to specify either a partition, block device, or node. if parents.count(None) == len(parents): if self.uses_storage: raise ValidationError( "One of partition or block device must be specified.") else: raise ValidationError("A node must be specified.") # You can have only one of partition, block device, or node. if len(parents) - parents.count(None) > 1: raise ValidationError( "Only one of partition, block device, or node can " "be specified.") # If fstype is for a bcache as a cache device it needs to be in a # cache_set. if (self.fstype == FILESYSTEM_TYPE.BCACHE_CACHE and self.cache_set is None): raise ValidationError( # XXX: Message leaks implementation details ("BCACHE_CACHE", # "cache_set"). "BCACHE_CACHE must be inside of a cache_set.") # Normalise the mount point to None or "none" if this filesystem does # not use it. The mount point (fs_file) field in fstab(5) is ignored # for filesystems that don't have a mount point (i.e. swap) and "none" # should be used, so it's used here too. When the mount point is set # to None (rather than the string "none") it means that the filesystem # is unmounted. This overloading is going to catch us out one day. if not self.uses_mount_point: if self.mount_point is not None: self.mount_point = "none" # You cannot place a filesystem directly on the boot_disk. It requires # a partition to be used. if self.block_device is not None: node = self.block_device.node boot_disk = node.get_boot_disk() if boot_disk is not None and boot_disk.id == self.block_device.id: # This is the boot disk for the node. raise ValidationError( "Cannot place filesystem directly on the boot disk. " "Create a partition on the boot disk first and then " "format the partition.") # Only ramfs and tmpfs can have a node as a parent. if self.uses_storage: if self.node is not None: raise ValidationError("A %s filesystem must be placed on a " "block device or partition." % self.fstype) else: if self.node is None: raise ValidationError( "RAM-backed filesystems cannot be placed on " "block devices or partitions.") # Non-storage filesystems MUST be mounted. if (not self.uses_storage) and (not self.is_mounted): raise ValidationError("RAM-backed filesystems must be mounted.") # There should be no duplicate mount points. if self.is_mounted and self.uses_mount_point: # Find another filesystem that's mounted at the same point. owning_node_other_matching_mount_point = ( Filesystem.objects.filter_by_node(self.get_node()).filter( mount_point=self.mount_point, acquired=self.acquired).exclude(id=self.id)) if owning_node_other_matching_mount_point.exists(): raise ValidationError( "Another filesystem is already mounted at %s." % (self.mount_point, )) def save(self, *args, **kwargs): if not self.uuid: self.uuid = uuid4() super().save(*args, **kwargs)
class InstanceOptions(AwsOptions): auto_created = False app_label = 'aws_duck' model_name = 'instance' verbose_name = 'EC2 instance' verbose_name_raw = 'EC2 instance' verbose_name_plural = 'EC2 instances' object_name = 'instance' default_related_name = None _aws_pk_field = 'id' _aws_fields = { 'id': AwsAutoField(), 'ami_launch_index': CharField(max_length=200, editable=False), 'architecture': CharField(max_length=200, editable=False), 'block_device_mappings': CharField(max_length=200, editable=False), 'client_token': CharField(max_length=200, editable=False), 'ebs_optimized': CharField(max_length=200, editable=False), 'hypervisor': CharField(max_length=200, editable=False), 'iam_instance_profile': CharField(max_length=200, editable=False), 'image_id': CharField(max_length=200, editable=False), 'instance_id': CharField(max_length=200, editable=False), 'instance_lifecycle': CharField(max_length=200, editable=False), 'instance_type': CharField(max_length=200, editable=False), 'kernel_id': CharField(max_length=200, editable=False), 'key_name': CharField(max_length=200, editable=False), 'launch_time': CharField(max_length=200, editable=False), 'monitoring': CharField(max_length=200, editable=False), 'network_interfaces': CharField(max_length=200, editable=False), 'placement': CharField(max_length=200, editable=False), 'platform': CharField(max_length=200, editable=False), 'private_dns_name': CharField(max_length=200, editable=False), 'private_ip_address': CharField(max_length=200, editable=False), 'product_codes': CharField(max_length=200, editable=False), 'public_dns_name': CharField(verbose_name='public DNS name', max_length=200, editable=False), 'public_ip_address': CharField(verbose_name='public IP address', max_length=200, editable=False), 'ramdisk_id': CharField(max_length=200, editable=False), 'root_device_name': CharField(max_length=200, editable=False), 'root_device_type': CharField(max_length=200, editable=False), 'security_groups': CharField(max_length=200, editable=False), 'source_dest_check': CharField(max_length=200, editable=False), 'spot_instance_request_id': CharField(max_length=200, editable=False), 'sriov_net_support': CharField(max_length=200, editable=False), 'state': CharField(max_length=200, editable=False), 'state_reason': CharField(max_length=200, editable=False), 'state_transition_reason': CharField(max_length=200, editable=False), 'subnet_id': CharField(max_length=200, editable=False), 'tags': CharField(max_length=200, editable=False), 'virtualization_type': CharField(max_length=200, editable=False), 'vpc_id': CharField(max_length=200, editable=False), } def _bind(self): super()._bind() from .models import Image, Instance self.thread = ForeignKey(Image) self.thread.contribute_to_class(Image, 'ami') self.concrete_model = Instance self._aws_other_fields['thread'] = self.thread
def __init__(self, **kwargs): kwargs.setdefault('to', get_user_model()) kwargs.setdefault('null', True) kwargs.setdefault('blank', True) ForeignKey.__init__(self, **kwargs)