class Article(Document): author = StringField(max_length=30, required=True) title = StringField(max_length=120, required=True) content = StringField(required=True) tags = ListField(StringField(max_length=30)) date = DateTimeField(default=datetime.datetime.now)
class Post(DynamicDocument): title = StringField(max_length=200, required=True) date_modified = DateTimeField(default=datetime.datetime.now)
class Job(MongoModel, Document): brewtils_model = brewtils.models.Job meta = { "auto_create_index": False, "index_background": True, "indexes": [ { "name": "next_run_time_index", "fields": ["next_run_time"], "sparse": True, }, { "name": "job_system_fields", "fields": [ "request_template.namespace", "request_template.system", "request_template.system_version", ], }, ], } TRIGGER_MODEL_MAPPING = { "date": DateTrigger, "cron": CronTrigger, "interval": IntervalTrigger, "file": FileTrigger, } name = StringField(required=True) trigger_type = StringField(required=True, choices=BrewtilsJob.TRIGGER_TYPES) trigger = GenericEmbeddedDocumentField( choices=list(TRIGGER_MODEL_MAPPING.values())) request_template = EmbeddedDocumentField("RequestTemplate", required=True) misfire_grace_time = IntField() coalesce = BooleanField(default=True) next_run_time = DateTimeField() success_count = IntField(required=True, default=0, min_value=0) error_count = IntField(required=True, default=0, min_value=0) status = StringField(required=True, choices=BrewtilsJob.STATUS_TYPES, default="RUNNING") max_instances = IntField(default=3, min_value=1) timeout = IntField() def clean(self): """Validate before saving to the database""" if self.trigger_type not in self.TRIGGER_MODEL_MAPPING: raise ModelValidationError( f"Cannot save job. No mongo model for trigger type {self.trigger_type}" ) trigger_class = self.TRIGGER_MODEL_MAPPING.get(self.trigger_type) if not isinstance(self.trigger, trigger_class): raise ModelValidationError( f"Cannot save job. Expected trigger type {self.trigger_type} but " f"actual type was {type(self.trigger)}")
class RequestLog(Document): user = StringField(required=True) request_body = StringField(required=True) request_headers = StringField(required=True) date = DateTimeField(default=datetime.datetime.utcnow) objects = QuerySetManager()
class DateDoc(Document): the_date = DateTimeField(required=True)
class Job(db.Document): created_at = DateTimeField(required=True) last_modified_at = DateTimeField(required=True, default=datetime.datetime.now) job_id = StringField(required=True) executions = ListField(EmbeddedDocumentField(JobExecution)) task = ReferenceField("Task", required=True, reverse_delete_rule=mongoengine.CASCADE) metadata = DictField(required=False) scheduled = BooleanField(required=True, default=False) def save(self, *args, **kwargs): if self.executions is None: self.executions = [] if not self.created_at: self.created_at = datetime.datetime.utcnow() self.last_modified_at = datetime.datetime.utcnow() return super(Job, self).save(*args, **kwargs) def create_execution(self, image, command): ex_id = str(uuid4()) ex = JobExecution( execution_id=ex_id, image=image, command=command, created_at=datetime.datetime.utcnow(), ) self.executions.append(ex) self.save() return ex def get_metadata(self, blacklist): if "envs" in self.metadata: envs = {} for key, val in self.metadata["envs"].items(): for word in blacklist: if word in key.lower(): val = "*" * len(str(val)) break envs[key] = val self.metadata["envs"] = envs return self.metadata def to_dict( self, include_log=False, include_error=False, include_executions=True, blacklist=None, ): if blacklist is None: blacklist = [] meta = self.get_metadata(blacklist) res = { "createdAt": self.created_at.isoformat(), "lastModifiedAt": self.last_modified_at.isoformat(), "taskId": self.task.task_id, "scheduled": self.scheduled, "executionCount": len(self.executions), "metadata": meta, } if include_executions: executions = [ ex.to_dict(include_log, include_error) for ex in self.executions ] res["executions"] = executions return res @classmethod def get_by_id(cls, task_id, job_id): from fastlane.models.task import Task if task_id is None or task_id == "" or job_id is None or job_id == "": raise RuntimeError( "Task ID and Job ID are required and can't be None or empty.") t = Task.objects(task_id=task_id).first() j = cls.objects(task=t, job_id=job_id).first() return j def get_execution_by_id(self, execution_id): for job_execution in self.executions: if job_execution.execution_id == execution_id: return job_execution return None def get_last_execution(self): if not self.executions: return None return self.executions[-1]
class User(Document): email = StringField(required=True) name = StringField(max_length=32, required=True) password = StringField(max_length=16, required=True) birthday = DateTimeField(required=True)
class Links(Document): meta = {'collection': 'linkedin_links'} url = StringField() last_fetch_at = DateTimeField(default=datetime.min)
class AssignmentAnswer(Document): assignment = ReferenceField(Assignment,required=True) answers = ListField(StringField(),required=True) points_get = ListField(IntField()) #作业上交时间 submit_time = DateTimeField(required=True)
class AdminBlog(Document): title = StringField(max_length=128, required=True) text = StringField(required=True) author = ReferenceField('User') ctime = DateTimeField(default=datetime.now)
class SessionConfigs(Document): sesssionExpirationTime = LongField(required=True, default=60) carryOverSlots = BooleanField(required=True, default=True) bot = StringField(required=True) user = StringField(required=True) timestamp = DateTimeField(default=datetime.utcnow)
class Album(MusicCollection): release_date = DateTimeField()
class DateTimeAttr(ValueAttr): value = DateTimeField() def getKeywords(self): return []
class StudentGroup(EmbeddedDocument): name = StringField(required=True) members = ListField(ReferenceField(Student)) datetime_modified = DateTimeField(default=datetime.now)
class Vote(InheritableEmbeddedDocument): """ A class to be used to store simple user sentiment. """ user = StringField() created = DateTimeField(help_text="When the user voted (liked or disliked) the object.", default=datetime.datetime.utcnow)
class Contact(Document): name = StringField(max_length=200, required=False) phone = StringField(max_length=200, required=True) date_modified = DateTimeField(default=datetime.datetime.now)
class User(JsonMixin, DynamicDocument): """ An account on 1base. """ email = EmailField(max_length=1024, required=True, unique=True) password = StringField(max_length=1027, required=True) username = StringField(max_length=120, required=False, unique=True, sparse=True) groups = ListField(ReferenceField(Group), default=list()) # attributes required by flask-login # https://flask-login.readthedocs.io/en/latest/ # `is_authenticated` and `is_anonymous` are attibutes in constructor is_active = BooleanField(default=False) verification = StringField(default=token_urlsafe()) # Personal "business" information sex = StringField(choices=SEX_CHOICES) first_name = StringField(max_length=1024) middle_name = StringField(max_length=1024) last_name = StringField(max_length=1024) birth_date = DateTimeField() phone_number = StringField(max_length=48) # Developer information api_key = StringField() # Frilly information avatar = ForgivingURLField() def __init__(self, *args, **kwargs): """ Construct a new user. """ self.is_authenticated = False self.is_anonymous = False super(User, self).__init__(*args, **kwargs) def get_id(self): """ Get ID of the user. Required by `flask-login <https://flask-login.readthedocs.io/en/latest/>`_ """ return self.id.encode('unicode') def generate_api_key(self): self.api_key = token_urlsafe() @property def all_permissions(self): """ Return all permissions used by the user. """ perms = [] for g in self.groups: perms.extend(g.permissions) return list(tuple(perms)) @property def is_admin(self): """ Return True if user is in any ADMIN_GROUPS group. """ return len([g for g in self.groups if g.name in ADMIN_GROUPS]) > 0 def can_any(self, *permissions): """ Return True if user can use any of the permissions. NOTE: By default will return True if group name is listed in onebase_common.settings.ADMIN_GROUPS """ if self.is_admin: return True ap = self.all_permissions for p in permissions: if p in ap: return True return False def can_all(self, *permissions): """ Return True if user can use *all* permissions listed. NOTE: By default will return True if group name is listed in onebase_common.settings.ADMIN_GROUPS :param permissions: Permissions for the user. """ if self.is_admin: return True ap = self.all_permissions for p in permissions: if p not in ap: return False return True def to_json(self): return super(User, self).to_json(omit=['password'])
class ContactUser(Document): contact_jid = StringField(max_length=200, required=True) user_id = StringField(max_length=200, required=True) date_modified = DateTimeField(default=datetime.datetime.now)
class FeatureModel(BaseModel): """ 待办跟踪 """ title = StringField(required=True, verbose_name='标题', max_length=50, min_length=2) desc = StringField(required=True, verbose_name='需求描述', min_length=2) demander = StringField(required=True, verbose_name='需求方', min_length=2) priority = StringField(required=True, verbose_name='优先级', choices=PriorityEnum.choices(), default=PriorityEnum.LOW) status = StringField(required=True, verbose_name='需求状态', choices=ProcessStatusEnum.choices(), default=ProcessStatusEnum.UN_STARTED) expect_deadline = DateTimeField(required=True, verbose_name='预计完成时间') actual_deadline = DateTimeField(required=False, verbose_name='实际完成时间', null=True) submitter = ListField(required=True, verbose_name='提交人') implementer = StringField(required=True, verbose_name='执行人') meta = { 'collection': 'feature', 'verbose_name': '待办项跟踪', 'indexes': [ 'submitter', 'implementer', 'updated_time', 'created_time', ] } @property def status_render(self): return ProcessStatusEnum[self.status].desc @property def priority_render(self): return PriorityEnum[self.priority].desc @property def expect_deadline_render(self): return time2str(self.expect_deadline) @property def actual_deadline_render(self): if self.actual_deadline: return time2str(self.actual_deadline) return '-' @property def submitter_render(self): if not self.submitter: return '-' user_info = UserService.batch_get_user_by_email(self.submitter) names = [] for email in self.submitter: name = (user_info.get(email) or {}).get('name') if name: names.append(name) return ','.join(names) or '-'
class Message(Document): name = StringField(max_length=200, required=True) slug = StringField(max_length=200, required=True) message = StringField() date_modified = DateTimeField(default=datetime.datetime.now)
class EBAP(Request): full_name = 'Eliminación de la historia académica BAPI' commite_cm = IntField(default=1, display='Acta de comité') commite_cm_date = DateTimeField(display='Fecha acta de comité', default=datetime.date.today) regulation_list = ['008|2008|CSU'] # List of regulations str_cm = [ 'eliminar la historia académica BAPI, debido a que {}.', ] str_pcm = [ 'Modalidad de trabajo de grado: Asignaturas de posgrado. Acta de comité {}, del {} de {} ' + 'del {}.' ] def cm(self, docx): paragraph = docx.add_paragraph() paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY paragraph.paragraph_format.space_after = Pt(0) paragraph.add_run(self.str_council_header + ' ') self.cm_answer(paragraph) paragraph.add_run(' ({}). '.format( self.regulations[self.regulation_list[0]][0])) def cm_answer(self, paragraph): paragraph.add_run( # pylint: disable=no-member self.get_approval_status_display().upper() + ' ').font.bold = True paragraph.add_run(self.str_cm[0].format( '' if self.is_affirmative_response_approval_status() else 'no ') + '.') def pcm(self, docx): self.pcm_analysis(docx) paragraph = docx.add_paragraph() paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY paragraph.paragraph_format.space_after = Pt(0) paragraph.add_run(self.str_answer + ': ').bold = True paragraph.add_run(self.str_comittee_header + ' ') self.pcm_answer(paragraph) def pcm_analysis(self, docx): analysis_list = [] analysis_list += [ self.str_pcm[0].format( # pylint: disable=no-member self.commite_cm, self.commite_cm_date.day, num_to_month(self.commite_cm_date.month), self.commite_cm_date.year) ] analysis_list += self.extra_analysis add_analysis_paragraph(docx, analysis_list) def pcm_answer(self, paragraph): paragraph.add_run( # pylint: disable=no-member self.get_advisor_response_display().upper() + ' ').font.bold = True paragraph.add_run(self.str_cm[0].format(self.council_decision)) def resource_analysis(self, docx): last_paragraph = docx.paragraphs[-1] self.pcm_answer(last_paragraph) def resource_pre_answer(self, docx): last_paragraph = docx.paragraphs[-1] self.pcm_answer(last_paragraph) def resource_answer(self, docx): last_paragraph = docx.paragraphs[-1] self.cm_answer(last_paragraph)
class Instance(Document): url = StringField(required=True, unique=True) creation_date = DateTimeField(required=True, default=datetime.now) update_date = DateTimeField() allow_user_creation = BooleanField() brand = StringField() enable_quotas = BooleanField() require_login = BooleanField() terms_url = StringField() version = StringField() city = StringField() zipcode = StringField() country = StringField() country_code = StringField() latitude = FloatField() longitude = FloatField() meta = {'collection': 'instances'} # { # "as":"AS2259 UNIVERSITE DE STRASBOURG", # "city":"Strasbourg", # "country":"France", # "countryCode":"FR", # "isp":"Universite De Strasbourg", # "lat":48.6004, # "lon":7.7874, # "org":"Universite De Strasbourg", # "query":"130.79.78.25", # "region":"GES", # "regionName":"Grand-Est", # "status":"success", # "timezone":"Europe/Paris", # "zip":"67000" # } @classmethod def add_instance(cls, url): try: instance = Instance.objects.get(url=url) except Instance.DoesNotExist: instance = Instance(url=url) try: galaxy_instance = GalaxyInstance(url=url) instance_config = galaxy_instance.config.get_config() instance.update_date = datetime.now() instance.allow_user_creation = instance_config[ 'allow_user_creation'] instance.brand = instance_config['brand'] instance.enable_quotas = 'enable_quotas' in instance_config and instance_config[ 'enable_quotas'] instance.require_login = instance_config['require_login'] instance.terms_url = instance_config['terms_url'] instance.version = instance_config['version_major'] url_data = urlparse(url) try: instance_location = requests.get('http://ip-api.com/json/%s' % url_data.netloc) except requests.exceptions.ConnectionError: print "Unable to get location data for %s" % url_data.netloc else: try: instance_location = instance_location.json() except ValueError: print "Unable to decode location data for %s" % url_data.netloc else: instance.city = instance_location['city'] instance.zipcode = instance_location['zip'] instance.country = instance_location['country'] instance.country_code = instance_location['countryCode'] instance.latitude = instance_location['lat'] instance.longitude = instance_location['lon'] instance.save() Tool.retrieve_tools_from_instance(instance=instance) except ConnectionError: print "Unable to add or update %s" % url def get_tools_count(self): tool_versions = ToolVersion.objects(instances=self) seen = set() unique_tool = [ tool_version for tool_version in tool_versions if not (tool_version.name in seen or seen.add(tool_version.name)) ] return len(unique_tool) @property def location(self): if self.city is not None and self.country is not None: return "%s, %s" % (self.city, self.country) else: return "Unknown"
class BaseModel(Document): create_at = DateTimeField() meta = {'allow_inheritance': True, 'abstract': True}
class ActivityLog(Document): # mongoDB will add a unique object '_id' for each 'Document' user_id = IntField(required=True) username = StringField(required=True, max_length=64) timestamp = DateTimeField(default=datetime.utcnow()) details = StringField(required=True)
class Repository(Document): """ Defines schema for a pulp repository in the `repos` collection. :ivar repo_id: unique across all repos :type repo_id: mongoengine.StringField :ivar display_name: user-readable name of the repository :type display_name: mongoengine.StringField :ivar description: free form text provided by the user to describe the repo :type description: mongoengine.StringField :ivar notes: arbitrary key-value pairs programmatically describing the repo; these are intended as a way to describe the repo usage or organizational purposes and should not vary depending on the actual content of the repo :type notes: mongoengine.DictField :ivar content_unit_counts: key-value pairs of number of units associated with this repo. This is different than the number of associations, since a unit may be associated multiple times. :type content_unit_counts: mongoengine.DictField :ivar scratchpad: Field used to persistently store arbitrary information from the plugins across multiple operations. :type scratchpad: mongoengine.DictField :ivar last_unit_added: Datetime of the most recent occurence of adding a unit to the repo :type last_unit_added: mongoengine.DateTimeField :ivar last_unit_removed: Datetime of the most recent occurence of removing a unit from the repo :type last_unit_removed: mongoengine.DateTimeField :ivar _ns: (Deprecated) Namespace of repo, included for backwards compatibility. :type _is: mongoengine.StringField """ # Previously, this field was 'id'. This field is required to be unique, but the previous index # was '-id'. Setting unique=True here would generate a new 'repo_id' index. Instead, we set the # index in meta and enforce uniqueness there. repo_id = StringField(required=True, regex=r'^[.\-_A-Za-z0-9]+$') display_name = StringField() description = StringField() notes = DictField() scratchpad = DictField(default={}) content_unit_counts = DictField(default={}) last_unit_added = DateTimeField() last_unit_removed = DateTimeField() # For backward compatibility _ns = StringField(default='repos') meta = { 'collection': 'repos', 'allow_inheritance': False, 'indexes': [{ 'fields': ['-repo_id'], 'unique': True }], 'queryset_class': RepoQuerySet } serializer = RepoSerializer def to_transfer_repo(self): """ Converts the given database representation of a repository into a plugin repository transfer object, including any other fields that need to be included. Note: In the transfer unit, the repo_id is accessed with obj.id for backwards compatability. :return: transfer object used in many plugin API calls :rtype: pulp.plugins.model.Repository} """ r = plugin_repo(self.repo_id, self.display_name, self.description, self.notes, content_unit_counts=self.content_unit_counts, last_unit_added=self.last_unit_added, last_unit_removed=self.last_unit_removed, repo_obj=self) return r def update_from_delta(self, repo_delta): """ Update the repository's fields from a delta. Keys that are not fields will be ignored. :param delta: key value pairs that represent the new values :type delta: dict """ # Notes is done seperately to only change notes fields that are specified. If a notes # field is set to None, remove it. if 'notes' in repo_delta: for key, value in repo_delta.pop('notes').items(): if value is None: self.notes.pop(key) else: self.notes[key] = value # These keys may not be changed. prohibited = [ 'content_unit_counts', 'repo_id', 'last_unit_added', 'last_unit_removed' ] [ setattr(self, key, value) for key, value in repo_delta.items() if key not in prohibited ]
class Exchange(Document): parent_id = StringField() name = StringField(required=True) guild = StringField(required=True) description = StringField() channel_id = StringField() engagement_id = StringField() type_name = StringField() characters = ListField(StringField()) opposition = ListField(StringField()) active_turn = StringField() started_on = DateTimeField() ended_on = DateTimeField() archived = BooleanField(default=False) history_id = StringField() created_by = StringField() created = DateTimeField(required=True) updated_by = StringField() updated = DateTimeField(required=True) @classmethod def post_save(cls, sender, document, **kwargs): if document.history_id: user = User().get_by_id(document.updated_by) user.history_id = document.history_id user.updated_by = document.updated_by user.updated = document.updated user.save() print({'history_id': document.history_id}) else: changes = document._delta()[0] action = 'updated' if 'created' in kwargs: action = 'created' if kwargs['created'] else action if action == 'updated' and 'archived' in changes: action = 'archived' if changes['archived'] else 'restored' Log().create_new(str(document.id), document.name, document.updated_by, document.guild, 'Exchange', changes, action) user = User().get_by_id(document.updated_by) if user.history_id: user.history_id = None user.updated_by = document.updated_by user.updated = document.updated user.save() print(changes) @staticmethod def query(): return Exchange.objects @staticmethod def filter(**params): return Exchange.objects.filter(**params) def create_new(self, user, guild, channel_id, engagement_id, name, archived): self.name = name self.guild = guild self.parent_id = engagement_id self.channel_id = channel_id self.engagement_id = engagement_id self.created_by = str(user.id) self.created = T.now() self.updated_by = str(user.id) self.updated = T.now() self.save() return self def find(self, guild, channel_id, engagement_id, name, archived=False): filter = Exchange.objects(guild=guild, channel_id=channel_id, engagement_id=engagement_id, name__icontains=name, archived=archived) exchange = filter.first() return exchange def get_or_create(self, user, guild, channel, engagement, name, archived=False): exchange = self.find(guild, str(channel.id), str(engagement.id), name, archived) if exchange is None: exchange = self.create_new(user, guild, str(channel.id), str(engagement.id), name, archived) exchange.character = Character().get_or_create( user, name, guild, exchange, 'Exchange', archived) exchange.save() return exchange def get_by_id(self, id): exchange = Exchange.objects(id=id).first() return exchange @classmethod def get_by_channel(cls, channel, archived=False, page_num=1, page_size=5): if page_num: offset = (page_num - 1) * page_size items = cls.filter( channel_id=str(channel.id), archived=archived).skip(offset).limit(page_size).all() else: items = cls.filter(channel_id=str(channel.id), archived=archived).order_by('name', 'created').all() return items @classmethod def get_by_engagement(cls, engagement, archived=False, page_num=1, page_size=5): if page_num: offset = (page_num - 1) * page_size items = cls.filter( engagement_id=str(engagement.id), archived=archived).skip(offset).limit(page_size).all() else: items = cls.filter(engagement_id=str(engagement.id), archived=archived).order_by('name', 'created').all() return items @classmethod def get_by_page(cls, params, page_num=1, page_size=5): if page_num: offset = (page_num - 1) * page_size logs = cls.filter(**params).order_by( 'name', 'created').skip(offset).limit(page_size).all() else: logs = cls.filter(**params).order_by('name', 'created').all() return logs @classmethod def get_by_parent(cls, **params): items = cls.filter(**params).all() return [items] if items else [] def archive(self, user): self.reverse_archive(user) self.archived = True self.updated_by = str(user.id) self.updated = T.now() self.save() def reverse_archive(self, user): for z in Exchange().get_by_parent(parent_id=str(self.id)): z.reverse_archive(user) z.archived = True z.updated_by = str(user.id) z.updated = T.now() z.save() def restore(self, user): self.reverse_restore(user) self.archived = False self.updated_by = str(user.id) self.updated = T.now() self.save() def reverse_restore(self, user): for z in Exchange().get_by_parent(parent_id=str(self.id)): z.reverse_restore(user) z.archived = False z.updated_by = str(user.id) z.updated = T.now() z.save() def get_string_characters(self, channel=None): characters = [Character.get_by_id(id) for id in self.characters] characters = '***\n ***'.join(c.name for c in characters if c) return f' _Characters:_\n ***{characters}***' def get_short_string_characters(self, channel=None): characters = [Character.get_by_id(id) for id in self.characters] characters = ', '.join(c.name for c in characters if c) return f' _({characters})_' def get_string_opposition(self, channel=None): opposition = [Character.get_by_id(id) for id in self.opposition] opposition = '***\n ***'.join(c.name for c in opposition if c) return f' _Opposition:_\n ***{opposition}***' def get_short_string_opposition(self, channel=None): opposition = [Character.get_by_id(id) for id in self.opposition] opposition = ', '.join(c.name for c in opposition if c) return f' _({opposition})_' def get_string(self, channel, user=None): name = f'***{self.name}***' active = '' if channel: active = ' _(Active Exchange)_ ' if str( self.id) == channel.active_exchange else '' start = '' if self.started_on: start = f'\n_Started On:_ ***{T.to(self.started_on, user)}***' if self.started_on else '' end = '' if self.ended_on: end = f'\n_Ended On:_ ***{T.to(self.ended_on, user)}***' if self.ended_on else '' description = f' - "{self.description}"' if self.description else '' characters = f'\n\n{self.get_string_characters()}' if self.characters else '' opposition = f'\n\n{self.get_string_opposition()}' if self.opposition else '' aspects = '' stress = '' if self.character: name = f'***{self.character.name}***' if self.character.name else name description = f' - "{self.character.description}"' if self.character.description else description aspects = self.character.get_string_aspects() stress = self.character.get_string_stress( ) if self.character.has_stress else '' return f' {name}{active}{start}{end}{description}{characters}{opposition}{aspects}{stress}' def get_short_string(self, channel=None): name = f'***{self.name}***' active = '' if channel: active = f' _(Active {str(self.type_name).title()})_ ' if str( self.id ) == channel.active_exchange else f' _({str(self.type_name).title()})_ ' characters = f'\n{self.get_short_string_characters()}' if self.characters else '' opposition = f' v. {self.get_short_string_opposition()}' if self.opposition else '' description = f' - "{self.description}"' if self.description else '' if self.character: name = f'***{self.character.name}***' if self.character.name else name description = f' - "{self.character.description}"' if self.character.description else '' return f' {name}{active}{description}{characters}{opposition}'
class Request(MongoModel, Document): brewtils_model = brewtils.models.Request # These fields are duplicated for job types, changes to this field # necessitate a change to the RequestTemplateSchema in brewtils. TEMPLATE_FIELDS = { "system": { "field": StringField, "kwargs": { "required": True } }, "system_version": { "field": StringField, "kwargs": { "required": True } }, "instance_name": { "field": StringField, "kwargs": { "required": True } }, "namespace": { "field": StringField, "kwargs": { "required": False } }, "command": { "field": StringField, "kwargs": { "required": True } }, "command_type": { "field": StringField, "kwargs": {} }, "parameters": { "field": DictField, "kwargs": {} }, "comment": { "field": StringField, "kwargs": { "required": False } }, "metadata": { "field": DictField, "kwargs": {} }, "output_type": { "field": StringField, "kwargs": {} }, } for field_name, field_info in TEMPLATE_FIELDS.items(): locals()[field_name] = field_info["field"](**field_info["kwargs"]) # Shared field with RequestTemplate, but it is required when saving Request namespace = StringField(required=True) parent = ReferenceField("Request", dbref=True, required=False, reverse_delete_rule=CASCADE) children = DummyField(required=False) output = StringField() output_gridfs = FileField() output_type = StringField(choices=BrewtilsCommand.OUTPUT_TYPES) status = StringField(choices=BrewtilsRequest.STATUS_LIST, default="CREATED") command_type = StringField(choices=BrewtilsCommand.COMMAND_TYPES) created_at = DateTimeField(default=datetime.datetime.utcnow, required=True) updated_at = DateTimeField(default=None, required=True) status_updated_at = DateTimeField() error_class = StringField(required=False) has_parent = BooleanField(required=False) hidden = BooleanField(required=False) requester = StringField(required=False) parameters_gridfs = FileField() meta = { "queryset_class": FileFieldHandlingQuerySet, "auto_create_index": False, # We need to manage this ourselves "index_background": True, "indexes": [ # These are used for sorting all requests { "name": "command_index", "fields": ["command"] }, { "name": "command_type_index", "fields": ["command_type"] }, { "name": "system_index", "fields": ["system"] }, { "name": "instance_name_index", "fields": ["instance_name"] }, { "name": "namespace_index", "fields": ["namespace"] }, { "name": "status_index", "fields": ["status"] }, { "name": "created_at_index", "fields": ["created_at"] }, { "name": "updated_at_index", "fields": ["updated_at"] }, { "name": "status_updated_at_index", "fields": ["status_updated_at"] }, { "name": "comment_index", "fields": ["comment"] }, { "name": "parent_ref_index", "fields": ["parent"] }, { "name": "parent_index", "fields": ["has_parent"] }, # These are for sorting parent requests { "name": "parent_command_index", "fields": ["has_parent", "command"] }, { "name": "parent_system_index", "fields": ["has_parent", "system"] }, { "name": "parent_instance_name_index", "fields": ["has_parent", "instance_name"], }, { "name": "parent_status_index", "fields": ["has_parent", "status"] }, { "name": "parent_created_at_index", "fields": ["has_parent", "created_at"] }, { "name": "parent_comment_index", "fields": ["has_parent", "comment"] }, # These are used for filtering all requests while sorting on created time { "name": "created_at_command_index", "fields": ["-created_at", "command"] }, { "name": "created_at_system_index", "fields": ["-created_at", "system"] }, { "name": "created_at_instance_name_index", "fields": ["-created_at", "instance_name"], }, { "name": "created_at_status_index", "fields": ["-created_at", "status"] }, # These are used for filtering parent while sorting on created time { "name": "parent_created_at_command_index", "fields": ["has_parent", "-created_at", "command"], }, { "name": "parent_created_at_system_index", "fields": ["has_parent", "-created_at", "system"], }, { "name": "parent_created_at_instance_name_index", "fields": ["has_parent", "-created_at", "instance_name"], }, { "name": "parent_created_at_status_index", "fields": ["has_parent", "-created_at", "status"], }, # These are used for filtering hidden while sorting on created time # I THINK this makes the set of indexes above superfluous, but I'm keeping # both as a safety measure { "name": "hidden_parent_created_at_command_index", "fields": ["hidden", "has_parent", "-created_at", "command"], }, { "name": "hidden_parent_created_at_system_index", "fields": ["hidden", "has_parent", "-created_at", "system"], }, { "name": "hidden_parent_created_at_instance_name_index", "fields": ["hidden", "has_parent", "-created_at", "instance_name"], }, { "name": "hidden_parent_created_at_status_index", "fields": ["hidden", "has_parent", "-created_at", "status"], }, # This is used for text searching { "name": "text_index", "fields": [ "$system", "$command", "$command_type", "$comment", "$status", "$instance_name", ], }, ], } logger = logging.getLogger(__name__) def pre_serialize(self): """Pull any fields out of GridFS""" encoding = "utf-8" if self.output_gridfs: self.logger.debug("Retrieving output from GridFS") self.output = self.output_gridfs.read().decode(encoding) self.output_gridfs = None if self.parameters_gridfs: self.logger.debug("Retrieving parameters from GridFS") self.parameters = json.loads( self.parameters_gridfs.read().decode(encoding)) self.parameters_gridfs = None def _pre_save(self): """Move request attributes to GridFS if too big""" self.updated_at = datetime.datetime.utcnow() encoding = "utf-8" # NOTE: The following was added for #1216, which aims to resolve the duplication # and orphaning of files in gridfs. It is less than ideal to do an additional # database lookup, but the various conversions to and from brewtils mean that # we get here having lost the parameters_gridfs and output_gridfs values, # preventing us from checking if they've already been populated. Rather than # perform a potentially dangerous rework of the entire Request update flow, # we opt to just pull the Request as it exists in the database so that we can # check those gridfs field. if self.id: try: old_request = Request.objects.get(id=self.id) self.parameters_gridfs = old_request.parameters_gridfs self.output_gridfs = old_request.output_gridfs except self.DoesNotExist: # Requests to child gardens have an id set from the parent, but no # local Request yet pass if self.parameters and self.parameters_gridfs.grid_id is None: params_json = json.dumps(self.parameters) if len(params_json) > REQUEST_MAX_PARAM_SIZE: self.logger.debug("Parameters too big, storing in GridFS") self.parameters_gridfs.put(params_json, encoding=encoding) if self.parameters_gridfs.grid_id: self.parameters = None if self.output and self.output_gridfs.grid_id is None: output_json = json.dumps(self.output) if len(output_json) > REQUEST_MAX_PARAM_SIZE: self.logger.info("Output size too big, storing in gridfs") self.output_gridfs.put(self.output, encoding=encoding) if self.output_gridfs.grid_id: self.output = None def _post_save(self): if self.status == "CREATED" and self.namespace == config.get( "garden.name"): self._update_raw_file_references() def _update_raw_file_references(self): parameters = self.parameters or {} for param_value in parameters.values(): if (isinstance(param_value, dict) and param_value.get("type") == "bytes" and param_value.get("id") is not None): try: raw_file = RawFile.objects.get(id=param_value["id"]) raw_file.request = self raw_file.save() except RawFile.DoesNotExist: self.logger.debug( f"Error locating RawFile with id {param_value['id']} " "while saving Request {self.id}") def save(self, *args, **kwargs): self._pre_save() super(Request, self).save(*args, **kwargs) self._post_save() def clean(self): """Validate before saving to the database""" if self.status not in BrewtilsRequest.STATUS_LIST: raise ModelValidationError( f"Can not save Request {self}: Invalid status '{self.status}'") if (self.command_type is not None and self.command_type not in BrewtilsRequest.COMMAND_TYPES): raise ModelValidationError( f"Can not save Request {self}: Invalid command type" f" '{self.command_type}'") if (self.output_type is not None and self.output_type not in BrewtilsRequest.OUTPUT_TYPES): raise ModelValidationError( f"Can not save Request {self}: Invalid output type '{self.output_type}'" ) # Deal with has_parent if self.has_parent is None: self.has_parent = bool(self.parent) elif self.has_parent != bool(self.parent): raise ModelValidationError( f"Cannot save Request {self}: parent value of {self.parent!r} is not " f"consistent with has_parent value of {self.has_parent}") if (self.namespace == config.get("garden.name")) and ( "status" in self.changed_fields or self.created): self.status_updated_at = datetime.datetime.utcnow() def clean_update(self): """Ensure that the update would not result in an illegal status transition""" # Get the original status old_status = Request.objects.get(id=self.id).status if self.status != old_status: if old_status in BrewtilsRequest.COMPLETED_STATUSES: raise RequestStatusTransitionError( "Status for a request cannot be updated once it has been " f"completed. Current: {old_status}, Requested: {self.status}" ) if (old_status == "IN_PROGRESS" and self.status not in BrewtilsRequest.COMPLETED_STATUSES): raise RequestStatusTransitionError( "Request status can only transition from IN_PROGRESS to a " f"completed status. Requested: {self.status}, completed statuses " f"are {BrewtilsRequest.COMPLETED_STATUSES}.")
class Url(Document): longUrl = StringField(required = True) shortCode = StringField(required = True, unique = True) shortUrl = StringField(required = True) createdAt = DateTimeField(default = datetime.now())
class Article(Document): """ 一篇文章内容,可以是 seeking alpha 中的一篇专栏文章,也可以是 twitter 上的一篇 post, 也可以是 google news search 的一篇结果等 """ uuid = StringField(max_length=32, primary_key=True) """各采编的数据处理函数自行决定该 hash 算法,只要全局不冲撞即可""" title = StringField(required=True) """文章的标题,或者是 post 的贴文等""" title_ana = EmbeddedDocumentField(AzureTextAna) abstract = StringField() """新闻的摘要,或 搜索,或者 twitter 的短文内容 """ abstract_ana = EmbeddedDocumentField(AzureTextAna) full_text_url = StringField() """全文的访问地址""" related_image_url = StringField() """如果是 twitter , 可能会有关联的图片 """ rating = IntField() """一些站点,如 seeking alpha 是有 rating 数的,这里记录该 rating 值""" rating_change = IntField() """评级变动""" pred_ret_this_yr = FloatField() """当年度return预测""" pred_pe_this_yr = FloatField() """当年度pe预测""" pred_ret_next_yr = FloatField() """下一年度return预测""" pred_pe_next_yr = FloatField() """下一年度pe预测""" publish_time = DateTimeField() related_symbols = ListField(LazyReferenceField(FinancialInstrumentSymbol), default=None) engine_site = StringField() """ 数据来自于, SeekingAlpha / GoogleNews / Twitter """ channel_in_site = StringField() """ 文章属于站点中的哪个栏目下的 , 包括可以使用 Search """ batch_action_uuid = StringField() action_uuid = StringField() from_searching_phase = LazyReferenceField(SearchingPhrase) """如果文章来源于一个搜索结果,则这里提供其中的一个搜索词""" seeking_alpha_author = LazyReferenceField(AuthorInSeekingAlpha) """ 文章如果来自于 seeking alpha , 则这里是 seeking alpha 的作者信息 """ seeking_alpha_extra = EmbeddedDocumentField(SeekingAlphaArticleExtra) """ 文章如果来自于 seeking alpha , 是一些 comments 等信息 """ twitter_poster = LazyReferenceField(UserInTwitter) """article如果来自于 twitter , 则 填入""" tweet_extra = EmbeddedDocumentField(TweetExtra) """tweet 的 comment / like / follow 等数据""" meta = { "indexes": [ "$title", # text index "#batch_action_uuid", "#action_uuid", "-publish_time", # follower 降序 "-rating", # follower 降序 "#engine_site", # hash index site 可以快速的选择不同的源 "#channel_in_site", # 用于快速过滤来源 ] }
class User(Document): email = StringField(required=True) password = StringField(max_length=200, required=True) creation_date = DateTimeField(default=datetime.datetime.utcnow) objects = QuerySetManager()