class Report(db.Document): REPORT_STATUS_CODE_OK = 0 REPORT_STATUS_CODE_FAILURE = 1 REPORT_STATUS_CODES = ( (REPORT_STATUS_CODE_OK, 'OK'), (REPORT_STATUS_CODE_FAILURE, 'FAIL'), ) analysis_system = ReferenceField(AnalysisSystem, required=True) sample = ReferenceField(Sample, required=True) analysis_date = DateTimeField() upload_date = DateTimeField(default=TimeFunctions.get_timestamp, required=True) status = IntField(choices=REPORT_STATUS_CODES, default=REPORT_STATUS_CODE_OK, required=True) error_message = StringField(null=True, required=False) tags = ListField(StringField()) additional_metadata = DictField() json_report_objects = MapField(field=FileField()) raw_report_objects = MapField(field=FileField()) meta = { 'ordering': ['-upload_date'], 'indexes': ['upload_date'] } def __repr__(self): return '[Report] {} on {}'.format(self.sample.id, self.analysis_system.identifier_name) def __str__(self): return self.__repr__() def _add_report_object(self, file, target): proxy = GridFSProxy() proxy.put(file) target[file.name] = proxy def add_json_report_object(self, file): self._add_report_object(file, self.json_report_objects) def add_raw_report_object(self, file): self._add_report_object(file, self.raw_report_objects)
class CrimeStatistics(EmbeddedDocument): offence_group = StringField(required=True, max_length=100) offence_type = StringField(max_length=100) data_of_year = ListField(EmbeddedDocumentField(StatisticsOfYear), required=True) other_info = MapField(StringField(max_length=20)) def __init__(self, offence_group, data_of_year, offence_type = '', other_info = {}, *args, **kwargs): super().__init__(*args, **kwargs) self.offence_group = offence_group self.offence_type = offence_type self.data_of_year = data_of_year self.other_info = other_info
class Lga(Document): title = StringField(required=True) subtitle = StringField(required=True) district = StringField(required=True, primary_key=True) offencegroups = MapField(EmbeddedDocumentField(OffenceGroup)) def __init__(self, title, subtitle, district, offencegroups, *args, **values): super().__init__(*args, **values) self.title = title self.subtitle = subtitle self.district = district self.offencegroups = offencegroups
class Region(Document): """ 区域信息 """ name = StringField(max_length=100, required=True) # DCMMC: TODO: 这个在模型的 json 里面暂时还没有... id_in_map = StringField(max_length=100, required=True, unique=True) floor = ReferenceField('Floor', require=True) # 车位 # {parkingNo: LazyRefField} 的字典 parkings = MapField(field=LazyReferenceField('Parking')) addition_info = StringField(max_length=10000, default='')
class Translation(Document): meta = { "collection": "translations", } # Text in default locale text = StringField(unique=True) # Is a name and should not be translated is_name = BooleanField(db_field="isName") # Translations keyed with locale translations = MapField(StringField())
class OffenceType(EmbeddedDocument): offence_type = StringField(min_length=None) years = MapField(EmbeddedDocumentField(Year)) _24_month_trend = StringField(min_length=None) _60_month_trend = StringField(min_length=None) LGA_ranking = StringField(min_length=None) def __init__(self, offence_type, years, _24_month_trend, _60_month_trend, LGA_ranking, *args, **kwargs): super().__init__(*args, **kwargs) self.offence_type = offence_type self.years = years self._24_month_trend = _24_month_trend self._60_month_trend = _60_month_trend self.LGA_ranking = LGA_ranking
class Floor(Document): """ 楼层信息 """ name = StringField(max_length=100, required=True) # 不作为 pk, 因为如果存在多个停车场, 就看会冲突 id_in_map = StringField(max_length=100, required=True, unique=True) # 几楼? floor_num = IntField(required=True) # 楼层里面可能有多个分区 # 使用懒加载, 避免只需要 pk 的时候的性能开销 # 访问的时候会返回一个 class instance, 所以要手动 # fetch() 才能解引用 # {id_in_map: LazyRefField} 的字典 regions = MapField(field=LazyReferenceField('Region')) addition_info = StringField(max_length=10000, default='')
class Profile(BDocument): id = SequenceField(primary_key=True) name = StringField(required=True, unique=True) is_root = BooleanField(default=False) default_state = StringField() permissions = MapField(EmbeddedDocumentField(Resource)) def is_allowed(self, resource, action): if not self.is_root: if resource in self.permissions and not self.permissions[resource].is_all: return action in self.permissions[resource].actions else: return resource in self.permissions else: return True
class Meme(Document): meta = {"collection": "memes", "allow_inheritance": False} id = EmbeddedDocumentField(MemeId, db_field="_id", required=True, primary_key=True) postings = MapField(field=IntField(), required=True) name = StringField(required=True) title = StringField(required=True) caption = StringField(required=True) length = IntField(required=True, min_value=1) score = IntField(required=True) url = URLField(required=True) image = URLField(required=True) def __repr__(self): return self.to_json()
class plant_types(Document): ''' Document of plants catalog: name = common name of the plant species = scientific name of the plant tags = a Map of plant_tag name : list of tag value string description = String containing description of plant //TODO: plant_image = image of each plant //TODO: guides = link to guides that might be useful ''' name = StringField() species = StringField() tags = MapField(ListField(StringField())) description = StringField() days_to_water = IntField() watering_description = StringField() image = StringField() def to_dict(self): return { "_id": str(self.id), "name": self.name, "species": self.species, "tags": self.tags, "description": self.description, "days_to_water": self.days_to_water, "watering_description": self.watering_description } def to_dict_image(self): return { "_id": str(self.id), "name": self.name, "species": self.species, "tags": self.tags, "description": self.description, "days_to_water": self.days_to_water, "watering_description": self.watering_description, "image": self.image }
class EventDescriptor(DynamicDocument): """ Describes the objects in the data property of Event documents Attributes ---------- run_start : str Globally unique ID to the run_start document this descriptor is associated with. uid : str Globally unique ID for this event descriptor. time : float Creation time of the document as unix epoch time. data_keys : mongoengine.DynamicEmbeddedDocument Describes the objects in the data property of Event documents """ run_start = ReferenceField(RunStart, reverse_delete_rule=DENY, required=True, db_field='run_start_id') uid = StringField(required=True, unique=True) time = FloatField(required=True) data_keys = MapField(EmbeddedDocumentField(DataKey), required=True) meta = {'indexes': ['-run_start', '-time', '-uid'], 'db_alias': ALIAS}
class TaxAnalysis(Document): """ A dictionary containing results from a taxonomic analysis. Some Runs are analyzed and the number of reads that align to different taxa are recorded. The taxanomic analysis is stored in the SRA as a hierarchy, but it is stored here as a flattend dictionary for easier access to different classes. Basic structure is: 'nspoot_analyze': The number of spots analyzed, 'total_spots': The total number of spots, 'mapped_spots': The number of spots that were able to be mapped, 'tax_count': A dictionary containing actual taxonomic counts organized by level in the tree of life 'kingdom': ... 'species': 'parent': Name of parent level. 'total_count': Number of mapped spots at this level and below. 'self_count': Number of mapped spots at this level. 'tax_id': taxonomic identifier. 'name': of this taxonomy. 'subspeciies': ... """ srr = StringField() nspot_analyze = IntField() total_spots = IntField() mapped_spots = IntField() tax_counts = MapField( ListField(EmbeddedDocumentField(TaxRecord), default=list))
class HoraLocalizacao(EmbeddedDocument): value = StringField() localizacoes = MapField(EmbeddedDocumentField(Localizacao)) def __str__(self): return 'Hora {}'.format(self.value)
class Person(Document): wallet = EmbeddedDocumentField(Wallet) wallet_map = MapField(EmbeddedDocumentField(Wallet))
class DoubleAnnotatorTask(BaseTask): TASK_TYPE = "Double Annotators" reference = ReferenceField('Annotator', required=True) target = ReferenceField('Annotator', required=True) # fully annotated textgrid from the ref annotator ref_tg = ReferenceField(BaseTextGridDocument) # fully annotated textgrid from the target annotator target_tg = ReferenceField(BaseTextGridDocument) # target and ref annotated tg's stacked one onto another, for annotation # merging merged_tg: MergedAnnotsTextGrid = ReferenceField(MergedAnnotsTextGrid) times_conflicts = EmbeddedDocumentField(MergeResults) # target and ref still stacked, but all annotations are the same merged_annots_tg: MergedAnnotsTextGrid = ReferenceField(MergedAnnotsTextGrid) # times that could be merged automatically are merged, annotators have to # agree on frontiers that are too "far away" from each other merged_times_tg: MergedTimesTextGrid = ReferenceField(MergedTimesTextGrid) # gamma values for each tier. tiers_gamma: Dict[str, float] = MapField(FloatField()) class Steps(Enum): PENDING = 0 PARALLEL = 1 TIERS_AGREEMENT = 2 MERGING_ANNOTS = 3 MERGING_TIMES = 4 DONE = 5 steps_names = { Steps.PENDING: "Pending", Steps.PARALLEL: "Parallel Annotations", Steps.TIERS_AGREEMENT: "Agreement on tiers", Steps.MERGING_ANNOTS: "Merging annotations", Steps.MERGING_TIMES: "Merging Times", Steps.DONE: "Done" } INITIAL_TEMPLATE_INSTRUCTIONS = \ """Annotate the file using the protocol defined by your annotation manager""" WAIT_FOR_OTHER_ANNOTATOR_INSTRUCTIONS = \ """Wait for the %s annotator to finish her job. Meanwhile, if you think your work is worth improving, you can still upload new versions of your annotated file.""" CANT_MAKE_MERGED = \ """Your TextGrid can't be merged for now because some tiers mismatch. Please make sure that the tiers in both your annotated file and your partner's are the same.""" REF_MERGE_ANNOTS_INSTRUCTIONS = \ """The 'Target' annotator should join you at this point. Your job is to find some agreement on the number of annotations and their content. Your goal is to have, for each pair of matching reference and target tier, the exact same amount of annotations with the same values. You don't have to agree on the timing of those annotations yet.""" TARGET_MERGE_ANNOTS_INSTRUCTIONS = \ """Join the reference annotator now, so you can (together) find an agreement based on your respective annotations. The merged file is to be edited on the reference annotator's computer.""" REF_MERGE_TIMES_INSTRUCTIONS = \ """Together with the Target annotator, make sure the Frontiers that still couldn't be merged are closer to one another (a difference smaller than %ims). The Frontiers that had too big of a mismatch are listed underneath.""" \ % int(MergedAnnotsTextGrid.DIFF_THRESHOLD * 1000) TARGET_MERGE_TIMES_INSTRUCTIONS = \ """Still with the Reference annotator, finish merging the Frontier's timing mismatches of the merged file. The merging has to be done on her computer. As a way to help her, the Frontiers that had too big of a mismatch are listed underneath.""" @property def current_step(self) -> Steps: if not self.has_started: return self.Steps.PENDING if self.is_done: return self.Steps.DONE if self.merged_annots_tg is not None: return self.Steps.MERGING_TIMES if self.merged_tg is not None: return self.Steps.MERGING_ANNOTS if self.target_tg is not None and self.ref_tg is not None: return self.Steps.TIERS_AGREEMENT return self.Steps.PARALLEL @property def annotators(self): return [self.reference, self.target] def notify_merged_ready(self, annotator: 'Annotator'): notif_dispatch( message=(f"The other annotator has finished their job on the" f" double-annotation task for file {self.data_file}"), notif_type="finished", object_type="task", object_id=str(self.id), users=[annotator]) def current_instructions(self, user: '******') -> str: if user == self.reference: if self.ref_tg is None: return self.INITIAL_TEMPLATE_INSTRUCTIONS elif self.ref_tg is not None and self.target_tg is None: return self.WAIT_FOR_OTHER_ANNOTATOR_INSTRUCTIONS % "target" elif self.ref_tg is not None and self.ref_tg is not None and self.merged_tg is None: return self.CANT_MAKE_MERGED elif self.merged_annots_tg is None: return self.REF_MERGE_ANNOTS_INSTRUCTIONS else: return self.REF_MERGE_TIMES_INSTRUCTIONS else: # it's the target annotator if self.target_tg is None: return self.INITIAL_TEMPLATE_INSTRUCTIONS elif self.ref_tg is None and self.target_tg is not None: return self.WAIT_FOR_OTHER_ANNOTATOR_INSTRUCTIONS % "reference" elif self.ref_tg is not None and self.ref_tg is not None and self.merged_tg is None: return self.CANT_MAKE_MERGED elif self.merged_annots_tg is None: return self.TARGET_MERGE_ANNOTS_INSTRUCTIONS else: return self.TARGET_MERGE_TIMES_INSTRUCTIONS @property def textgrids(self) -> Dict[str, Optional[BaseTextGridDocument]]: return { "template": self.template_tg, "ref": self.ref_tg, "target": self.target_tg, "merged": self.merged_tg, "merged_annots": self.merged_annots_tg, "merged_times": self.merged_times_tg, "final": self.final_tg } @property def allow_starter_zip_dl(self) -> bool: return self.current_step in (self.Steps.PARALLEL, self.Steps.PENDING, self.Steps.TIERS_AGREEMENT) @property def can_compute_gamma(self) -> bool: return self.current_step.value >= self.Steps.MERGING_ANNOTS.value def allow_file_upload(self, annotator: 'Annotator') -> bool: if annotator == self.reference: return True elif annotator == self.target: return self.current_step in (self.Steps.PENDING, self.Steps.PARALLEL, self.Steps.TIERS_AGREEMENT) def current_tg_template(self, user: '******') -> str: if self.merged_tg is None: if user == self.reference: if self.ref_tg is None: return "template" else: return "ref" else: # it's the target if self.target_tg is None: return "template" else: return "target" elif self.merged_annots_tg is None: return "merged" elif self.final_tg is None: return "merged_times" else: return "final" def get_annotator_status(self, annotator: 'Annotator'): double_annot_data = { "reference": self.reference.short_profile, "target": self.target.short_profile, "current_user_role": 'reference' if annotator == self.reference else 'target' } if self.current_step == self.Steps.MERGING_TIMES: double_annot_data["frontiers_merge_table"] = [error.to_msg() for error in self.times_conflicts.to_merge_conflicts_errors()] return {**super().get_annotator_status(annotator), "double_annot_data": double_annot_data} def process_ref(self, textgrid: str): """Handles the submission of a textgrid sent by the reference annotator""" if self.merged_tg is None: # it's a completed single-annotator textgrid tg = SingleAnnotatorTextGrid.from_textgrid(textgrid, [self.reference], self) tg.check() if not error_log.has_errors: self.ref_tg = tg if self.target_tg is not None: error_log.flush() merged_tg = MergedAnnotsTextGrid.from_ref_and_target(self.ref_tg, self.target_tg) if not error_log.has_errors: self.merged_tg = merged_tg self.notify_merged_ready(self.target) self.tiers_gamma = None self.campaign.update_stats(gamma_only=True) elif self.merged_annots_tg is None: # processing the merged annots textgrid tg = MergedAnnotsTextGrid.from_textgrid(textgrid, self.annotators, self) tg.check() if not error_log.has_errors: self.merged_annots_tg = tg merged_times_tg, self.times_conflicts = tg.gen_merged_times() self.merged_times_tg = MergedTimesTextGrid.from_textgrid(merged_times_tg, self.annotators, self) elif self.final_tg is None: tg = MergedTimesTextGrid.from_textgrid(textgrid, self.annotators, self) tg.check() if not error_log.has_errors: final_tg, _ = tg.check_times_merging() self.final_tg = SingleAnnotatorTextGrid.from_textgrid(final_tg, self.annotators, self) self.is_done = True self.finish_time = datetime.now() self.notify_done() self.campaign.update_stats() else: # re-submitting a final textgrid tg = SingleAnnotatorTextGrid.from_textgrid(textgrid, self.annotators, self) tg.check() if not error_log.has_errors: # we don't notify since it's already done self.final_tg = tg self.finish_time = datetime.now() self.tiers_gamma = {} def process_target(self, textgrid: str): """Handles the submission of a textgrid sent by the target annotator""" if self.merged_tg is None: # it's a completed textgrid tg = SingleAnnotatorTextGrid.from_textgrid(textgrid, self.annotators, self) tg.check() if not error_log.has_errors: self.target_tg = tg if self.ref_tg is not None: error_log.flush() merged_tg = MergedAnnotsTextGrid.from_ref_and_target(self.ref_tg, self.target_tg) if not error_log.has_errors: self.merged_tg = merged_tg self.notify_merged_ready(self.reference) self.tiers_gamma = None self.campaign.update_stats(gamma_only=True) def submit_textgrid(self, textgrid: str, annotator: 'Annotator'): if self.is_locked: return error_log.flush() if annotator == self.reference: self.process_ref(textgrid) elif annotator == self.target: self.process_target(textgrid) self.cascade_save() self._log_upload(textgrid, annotator, not error_log.has_errors) def validate_textgrid(self, textgrid: str, annotator: 'Annotator'): if self.is_locked: return error_log.flush() tg: BaseTextGridDocument = None if annotator == self.reference: if self.merged_tg is None: # it's a completed textgrid tg = SingleAnnotatorTextGrid.from_textgrid(textgrid, [self.reference], self) elif self.merged_annots_tg is None: # processing the merged annots textgrid tg = MergedAnnotsTextGrid.from_textgrid(textgrid, self.annotators, self) elif self.merged_times_tg is not None and self.final_tg is None: # the times haven't been merged yet tg = MergedTimesTextGrid.from_textgrid(textgrid, self.annotators, self) else: # it's the final textgrid tg = SingleAnnotatorTextGrid.from_textgrid(textgrid, self.annotators, self) elif annotator == self.target: # only one possible textgrid to validate tg = SingleAnnotatorTextGrid.from_textgrid(textgrid, [self.target_tg], self) else: return tg.check() self._log_upload(textgrid, annotator, not error_log.has_errors) def compute_gamma(self): checking_scheme: TextGridCheckingScheme = self.campaign.checking_scheme self.tiers_gamma = {} for tier_name, tier_scheme in checking_scheme.tiers_specs.items(): try: gamma_val = tier_scheme.compute_gamma(self.ref_tg.textgrid, self.target_tg.textgrid) except Exception as err: print(f'Got error "{type(err)} : {str(err)}" on task for file {self.data_file.name}, ' f'while computing gamma for tier {tier_name}.') continue else: if gamma_val is not None: self.tiers_gamma[tier_name] = gamma_val if not self.tiers_gamma: raise ValueError(f"Couldn't compute gamma for task {str(self.id)}")
class Bucket(Document): """ A bucket (e.g., project) where data is stored """ STORAGE_ADAPTORS = (StorageObject, S3StorageObject) bucket_name = StringField(max_length=120, unique=True, required=True, primary_key=True) name_map = MapField( StringField()) # {name: key} (Can just search for this, if needed) adaptor_map = MapField(IntField()) # {key: STORAGE_ADAPTORS index} tag_maps = MapField(ListField(StringField())) # {tag_name: keys[]} @classmethod def make(cls, bucketName, tagMaps=None, adaptorMap=None, nameMap=None, storageAdaptors=None): if tagMaps is None: tagMaps = {} if adaptorMap is None: adaptorMap = {} if nameMap is None: nameMap = {} return cls( **{ 'bucket_name': bucketName, 'tag_maps': tagMaps, 'name_map': nameMap, 'adaptor_map': adaptorMap }) # Bucket Naming def getBucketName(self): return self.bucket_name def setBucketName(self, name): self.reload() self.bucket_name = name self.save() self.reload() # Resolving Reference ID's and Names def hasKey(self, key): return isinstance(key, basestring) and key in self.adaptor_map def hasName(self, name): return isinstance(name, basestring) and name in self.name_map def hasRef(self, key=None, name=None): key = self.resolveRef(key, name) return self.hasKey(key) def resolveRef(self, key=None, name=None): ref = None if name == '': name = None if (key is not None or name is not None): if ((name is None and self.hasKey(key)) or ((name is not None) and (self.getName(key) == name))): ref = key elif (key is None and self.hasName(name)): ref = self.name_map[name] return ref def getMatchingKeys(self, tags=None, storageType=None): if tags is not None: keys = set() for tag in tags: if isinstance(tag, basestring) and tag in self.tag_maps: keys.update([ k for k in self.tag_maps[tag] if ((storageType is None) or ( storageType == self.adaptor_map.get(k, None))) ]) keys = list(keys) elif (storageType is not None): keys = [ k for k in self.adaptor_map.keys() if (storageType == self.adaptor_map.get(k, None)) ] else: keys = self.adaptor_map.keys() return keys # Managing Tags def hasTag(self, tag): return isinstance(tag, basestring) and tag in self.tag_maps def hasTagKey(self, tag, key=None, name=None): key = self.resolveRef(key, name) if self.hasTag(tag) and self.hasKey(key): return key in self.tag_maps[tag] return False def getObjectTags(self, key=None, name=None): key = self.resolveRef(key, name) if self.hasKey(key): tags = [tag for tag in self.tag_maps if key in self.tag_maps[tag]] return tags else: return [] def addTagKey(self, tag, key=None, name=None, delaySave=False): if not delaySave: self.reload() key = self.resolveRef(key, name) try: tag = str(tag) except Exception: logWarning("ERROR: Invalid tag couldn't convert from unicode") tag = None if key is not None and isinstance( tag, str) and not self.hasTagKey(tag, key): if self.hasTag(tag): self.tag_maps[tag].append(key) else: self.tag_maps[tag] = [key] if not delaySave: self.save() self.reload() def delTagKey(self, tag, key=None, name=None, delaySave=False): if not delaySave: self.reload() key = self.resolveRef(key, name) if self.hasTagKey(tag, key): self.tag_maps[tag].remove(key) if not delaySave: self.save() self.reload() def changeTags(self, tags, key=None, name=None, delaySave=False): if not delaySave: self.reload() key = self.resolveRef(key, name) oldTags = self.getObjectTags(key) obseleteTags = set(oldTags) # Add New Tags for tag in tags: self.addTagKey(tag, key, delaySave=delaySave) if tag in obseleteTags: obseleteTags.remove(tag) # Remove ones not in the new set for tag in obseleteTags: self.delTagKey(tag, key, delaySave=delaySave) if not delaySave: self.save() self.reload() def getStorageAdaptor(self, key=None, name=None): if name is not None: key = self.resolveRef(key, name) if self.hasKey(key): adaptorId = self.adaptor_map[key] return self.STORAGE_ADAPTORS[adaptorId] return None def _getData(self, key): if key is None: return None storage = self.getStorageAdaptor(key) if storage is not None: data = storage.objects(key=key).first() if data is None: logWarning("No data for: ", key) return data logWarning("No data for: ", key) return None def getValue(self, key=None, name=None): key = self.resolveRef(key, name) data = self._getData(key) if data is not None: return data.value else: return None def getLink(self, key=None, name=None): key = self.resolveRef(key, name) data = self._getData(key) if data is not None and isinstance(data, S3StorageObject): # Replace this with the S3 link in the future return "prod.x-in-y.com" else: return None def getName(self, key): data = self._getData(key) if data is not None: if data.name != '': return data.name else: return None else: return None def changeName(self, newName, key=None, name=None): self.reload() key = self.resolveRef(key, name) data = self._getData(key) isChanged, data = self._changeName(newName, data) if isChanged: data.save() self.save() self.reload() return True else: return False def _changeName(self, newName, data): if newName is None: newName = '' if ((data is not None) and (newName == '' or not self.hasName(newName))): key = data.key name = self.getName(key) # Update any internal naming data if data.data_type == SERIALIZABLE_DATA_TYPE: value = data.value value = nativizeObject(value, None, JSON_FORMAT) if isinstance(value, NamedSerializable): if newName == '': value.setName(None) else: value.setName(newName) value = serializeObject(value, JSON_FORMAT) data.value = value # Update the storage service data data.name = newName if (name is not None): del self.name_map[name] if newName != '': self.name_map[newName] = key isChanged = True else: isChanged = False return isChanged, data def getDescription(self, key=None, name=None): key = self.resolveRef(key, name) data = self._getData(key) if data is not None: return data.description else: return None def getDataType(self, key=None, name=None): key = self.resolveRef(key, name) data = self._getData(key) if data is not None: return data.data_type else: return None def setValue(self, key=None, value=None, name=None, description=None, tags=None, storageType=None, dataType=None, allowOverwrite=False, allowCreate=True): self.reload() logWarning("SETTING VALUE") hasKey = self.hasKey(key) hasName = self.hasName(name) ref = self.resolveRef(key, name) # Make sure reference is valid, if any given if (ref is None) and ((hasKey and hasName) or (hasName and key is not None)): logWarning( "INVALID: Mismatched unique keys in set value: (key=%s, name=%s)" % (key, name)) return False # Overwrite existing data # This is aborted if another entry uses has the new 'name' elif (ref is not None) and allowOverwrite: return self._updateValue(key, value, name, description, tags, storageType, dataType) # Create a new entry # The key must not already exist and a non-None value must be given elif (ref is None) and allowCreate: return self._createValue(key, value, name, description, tags, storageType, dataType) else: logWarning('INVALID CONDITION') return False def _updateValue(self, key, value=None, name=None, description=None, tags=None, storageType=None, dataType=None): key = self.resolveRef(key, name) currentName = self.getName(key) data = self._getData(key) if key is not None and data is not None: if name is not None and currentName != name: isChanged, data = self._changeName(name, data) if not isChanged: # Failed on change name attempt logWarning("Failed to update, rename failed: ", name) return False if value is not None: data.value = value if dataType is not None: data.data_type = dataType if description is not None: data.description = description if storageType is not None: # @TODO: Fix this so it works appropriately # (e.g., changes the stored object type) # For now, no-op # self.adaptor_map[key] = storageType pass if tags is not None: self.changeTags(tags, key, delaySave=True) data.save() self.save() self.reload() return True else: logWarning("Error in updateValue. Couldn't get rename for: " + str(key)) return False def _createValue(self, key, value, name='', description='', tags=None, storageType=DATA_TYPE_DB, dataType=''): # Force Valid Default values if name is None: name = '' if description is None: description = '' if tags is None: tags = [] if dataType is None: dataType = '' if storageType is None: storageType = DATA_TYPE_DB # Must be a valid storage type if (key is not None and isinstance(key, basestring) and isinstance(storageType, int) and storageType in VALID_STORAGE_TYPES): storageData = { 'key': key, 'value': value, 'name': name, 'data_type': dataType, 'description': description } storageClass = self.STORAGE_ADAPTORS[storageType] data = storageClass(**storageData) if name != '': self.name_map[name] = key self.adaptor_map[key] = storageType for tag in tags: self.addTagKey(tag, key, delaySave=True) data.save() self.save() self.reload() return True else: logWarning("Couldn't create :" + str(key)) return False def delValue(self, key=None, name=None): self.reload() key = self.resolveRef(key, name) if key is not None and self.hasKey(key): name = self.getName(key) data = self._getData(key) if name is not None: del self.name_map[name] del self.adaptor_map[key] self.changeTags([], key, delaySave=True) data.delete() self.save() self.reload() return True else: return False def exportData(self): keys = self.adaptor_map.keys() outData = [] for key in keys: value = self.getValue(key) name = self.getName(key) tags = self.getObjectTags(key) description = self.getDescription(key) storageType = self.adaptor_map[key] dataType = self.getDataType(key) outData.append([key, value, name, tags, storageType, dataType]) return outData def resolveInconsistencies(self, deleteBad=False): self.name_map.clear() self.save() for key in self.adaptor_map.keys(): data = self._getData(key) if data is None: badKeys.append(key) else: name = data.name if name == '': name = None self.name_map[name] = key self._changeName(name, data) self.save() return StorageObject.objects, self.adaptor_map, self.name_map
class AnoLocalizacao(EmbeddedDocument): value = StringField() meses = MapField(EmbeddedDocumentField(MesLocalizacao)) def __str__(self): return 'Ano {}'.format(self.value)
class Venue(Document): venue_name = StringField() input_json = DictField() base_layout = MapField(EmbeddedDocumentField(Section)) events = ListField(EmbeddedDocumentField(Event)) meta = {'collection': 'venue'} def create_venue(self, sections: dict) -> None: """ Builds the venue seating plan from the JSON defining the seating sections A section has the format of "sections": [ { "section_type": "house", "rows": [ { "row_rank": "1st Rank", "num_seats": 8, "num_rows": 3, "order": "non-sequential" } ... ] } """ reset_generator() for section in sections: self.base_layout[section['section_type']] = Section.create_section( section) self.save(load_bulk=False) def create_event(self, date: datetime, event_name: str = 'Test Event') -> Event: """ Creates an event in the venue """ self.events.append( Event(event_name=event_name, created_at=datetime.now(), date=date, sections=deepcopy(self.base_layout))) self.save() return self.events[-1] def get_event(self, event_id: str) -> Event: """ Get an event occurring in the venue """ for event in self.events: if str(event.id) == event_id: return event raise NotFoundException def make_reservation(self, event_id: str, *args, **kwargs) -> list: """ Interface to make a reservation for a group of people for a given event and section A list zero'ed will be returned if all people found a seat otherwise the number of people without a seat will be returned in the list """ event = self.get_event(event_id) result = event.make_reservation(*args, **kwargs) self.save() return result def block(self, event_id: str, *args, **kwargs) -> bool: """ Interface to mark a seat as blocked """ event = self.get_event(event_id) result = event.block(*args, **kwargs) if result: self.save() return result def to_dict(self) -> dict: """ A Venue's dict representation """ return { 'id': str(self.id), 'venue_name': self.venue_name, 'input_json': self.input_json, 'base_layout': { section_type: section.to_dict() for section_type, section in self.base_layout.items() }, 'events': [event.to_dict() for event in self.events] }
class Section(EmbeddedDocument): type = StringField(required=True) rows = MapField(ListField(EmbeddedDocumentField(Row))) def add_row(self, row: Row) -> None: """ Add a row to the section """ if row.rank not in self.rows: self.rows[row.rank] = [] self.rows[row.rank].append(row) def get_rows_with_seats(self) -> list: """ Get all the rows with free seats """ return [row for row in self.rows if row.free_seats > 0] def make_reservation(self, group: list) -> list: """ Makes the reservation for groups of people The number of people to be seated is the elements in the array and the array index represents the row rank. [2, 4] means 2 people to be seated in 1st rank, 4 people to be seated in 2nd rank The availability structure is built as follow { "1st Rank": { "1" : { "1": [Seat 3], "2": [Seat 1, Seat 2, Seat 4, Seat 5] } } } meaning Row 1 in 1st Rank has 1 isolated seats (Seat 3) Row 1 in 1st Rank has 2 2-contiguous seats (Seat 1, Seat 2 and Seat 4, Seat 5) A huge improvement would be keeping this structure cached so we do not need to generate it for every group of reservations """ if len(group) > len(self.rows.keys()): return group availability = {} for rank, rows in self.rows.items(): availability[rank] = defaultdict(dict) for row in rows: availability[rank][row.row_id] = row.number_contiguous_seats() to_seat = group[:] for rank_index, num_people in enumerate(group): for i, rank in enumerate(self.rows): if i == rank_index: rank_availability = availability[rank] for available_seats in rank_availability.values(): if len(available_seats.get( num_people, [])): # We found exactly num_people contiguous seats for i in range(num_people): available_seats[num_people][i].reserve() to_seat[rank_index] -= 1 break else: # let's try rows with more available contiguous seats OR let's split the group for num_available in available_seats.keys(): if num_available <= num_people: continue for i in range(num_people): available_seats[num_available][i].reserve() to_seat[rank_index] -= 1 if to_seat[rank_index] == 0: break if to_seat[rank_index] > 0: for num_available in reversed( list(available_seats.keys())): if num_available >= num_people: continue for i in range( min(num_available, to_seat[rank_index])): available_seats[num_available][i].reserve() to_seat[rank_index] -= 1 if to_seat[rank_index] == 0: break if to_seat[rank_index] == 0: break if to_seat[rank_index] == 0: break return to_seat def block(self, row_id: int, *args, **kwargs) -> bool: """ Interface to mark a seat in a row as blocked """ for _, rows in self.rows.items(): for row in rows: if row.row_id == str(row_id): return row.block(*args, **kwargs) return False @classmethod def create_section(cls, section_json: dict) -> 'Section': """ Parse the section part of the JSON in form of "sections": [ { "section_type": "house", "rows": [...] } ] A section is a group of rows """ section = cls(type=section_json['section_type']) for row_type in section_json['rows']: for _ in range(row_type['num_rows']): section.add_row(Row.create_row(row_type, next(id_generator))) return section def to_dict(self) -> dict: """ A Section's dict representation """ return { 'type': self.type, 'rows': { rank: [row.to_dict() for row in rows] for rank, rows in self.rows.items() } }
class CampaignStats(EmbeddedDocument): """Stores the campaing basic statistics""" # TODO add "refresh campaign stats handler" total_files = IntField(required=True) assigned_files = IntField(required=True) total_tasks = IntField(required=True) completed_tasks = IntField(required=True) single_annotator_tasks = IntField(required=True) double_annotator_tasks = IntField(required=True) tiers_gamma: Dict[str, float] = MapField(FloatField()) can_update_gamma = BooleanField() can_compute_gamma = BooleanField() gamma_updating = BooleanField(default=False) annotators = ListField(ReferenceField('Annotator')) def update_stats(self, campaign: 'Campaign'): """Update all statistics for that campaign""" self.total_tasks = len(campaign.tasks) self.completed_tasks = len( [task for task in campaign.tasks if task.is_done]) self.total_files = campaign.corpus.files_count self.assigned_files = len( set(task.data_file for task in campaign.tasks)) self.single_annotator_tasks = len([ task for task in campaign.tasks if isinstance(task, SingleAnnotatorTask) ]) self.double_annotator_tasks = len([ task for task in campaign.tasks if isinstance(task, DoubleAnnotatorTask) ]) all_annotators = set() for task in campaign.tasks: for annotator in task.annotators: all_annotators.add(annotator) self.annotators = list(all_annotators) self.update_gamma_stats(campaign) def update_gamma_stats(self, campaign: 'Campaign'): """Aggregates the gamma statistics for the campaign. Does **NOT** actually compute the gamma values""" if campaign.checking_scheme is None: # no gamma possible if a checking scheme hasn't been specified self.can_update_gamma = False self.can_compute_gamma = False self.gamma_updating = False else: self.can_compute_gamma = True tiers_gamma: Dict[str, List[float]] = defaultdict(list) # this flag can be set if one of the task is ripe # for gamma updating self.can_update_gamma = False for task in campaign.tasks: if not isinstance(task, DoubleAnnotatorTask): continue if not task.can_compute_gamma: continue if not task.tiers_gamma: self.can_update_gamma = True else: for tier_name, gamma_value in task.tiers_gamma.items(): tiers_gamma[tier_name].append(gamma_value) # TODO: computing mean gamma for each tier, can be changed? for tier_name, gamma_values in tiers_gamma.items(): self.tiers_gamma[tier_name] = mean(gamma_values) def to_msg(self): return { "total_files": self.total_files, "assigned_files": self.assigned_files, "total_tasks": self.total_tasks, "completed_tasks": self.completed_tasks, "can_update_gamma": self.can_update_gamma, "can_compute_gamma": self.can_compute_gamma, "gamma_updating": self.gamma_updating, "tiers_gamma": self.tiers_gamma }
class Track(Document): meta = {'collection': 'tracks'} def __init__(self, *args, **kwargs): super(Track, self).__init__(**kwargs) try: self.album = kwargs['album'] self.artist = kwargs['artists'][0]['name'] self.href = kwargs['href'] self.name = kwargs['name'] self.playlist_id = kwargs['playlist_id'] self.id = kwargs['id'] if 'track_id' in kwargs else ObjectId() self.track_id = kwargs[ 'track_id'] if 'track_id' in kwargs else kwargs['id'] self.voter_list = kwargs[ 'voter_list'] if 'voter_list' in kwargs else [] self.vote_count = kwargs[ 'vote_count'] if 'vote_count' in kwargs else 0 except Exception as e: print("Track object creation failed: ", e) id = ObjectIdField(primary_key=True) album = StringField() artist = StringField() href = StringField() name = StringField() playlist_id = StringField(required=True) track_attributes = MapField(field=FloatField()) track_id = StringField(required=True) voter_list = ListField(StringField()) vote_count = IntField() artists = ListField() available_markets = ListField(StringField()) disc_number = IntField() duration_ms = IntField() episode = BooleanField() explicit = BooleanField() external_fields = DictField() external_ids = DictField() external_urls = DictField() is_local = BooleanField() popularity = FloatField() preview_url = StringField() track = BooleanField() track_number = IntField() type = StringField() uri = StringField() image = DictField() def to_log(self): dict = { 'artist': self.artist, 'danceability': self.track_attributes['danceability'], 'liveness': self.track_attributes['liveness'], 'name': self.name, 'playlist_id': self.playlist_id, 'tempo': self.track_attributes['tempo'], 'track_id': str(self.track_id), 'voter_list': self.voter_list, 'vote_count': self.vote_count, 'id': self.id } return dict def pre_save(self): if self.playlist_id is None: raise TrackException('Every track needs to have a playlist_id') if not self.image: self.set_image() self._add_audio_analysis() def save(self, **kwargs): self.pre_save() super(Track, self).save() def new_save(self, **kwargs): if not self.track_exists(): self.save() def _add_audio_analysis(self): me_headers = { 'Authorization': 'Bearer {}'.format(tokens.get_access_token()) } response = requests.get(settings.API_URL_BASE.format( endpoint='audio-features/{id}'.format(id=self.track_id)), headers=me_headers) audio_analysis_info = json.loads(response.text) if 'danceability' in audio_analysis_info.keys(): self.track_attributes['danceability'] = audio_analysis_info[ 'danceability'] if 'liveness' in audio_analysis_info.keys(): self.track_attributes['liveness'] = audio_analysis_info['liveness'] if 'tempo' in audio_analysis_info.keys(): self.track_attributes['tempo'] = audio_analysis_info['tempo'] def track_exists(self): tracks = Track.objects(track_id=self.track_id, playlist_id=self.playlist_id) if tracks: return True return False def set_image(self): track_url = 'tracks/{id}'.format(id=self.track_id) track_data = spotify.get( settings.API_URL_BASE.format(endpoint=track_url)) self.image = track_data['album']['images'][-1:][0]
class WorkflowTool(Document): """Abstract class that defines a workflow tool Stores name and description, any notes about the tool, and its dependencies. Provides a uniform interface to access these capabilities """ meta = {'allow_inheritance': True} name = StringField(required=True, regex="^[^\\s+]*$", help_text='Short identifier of this tool') """Name of this tool. Cannot have whitespace""" description = StringField( required=True, help_text='Longer description of what this tool does') """Description of this tool""" last_run = DateTimeField(help_text='When this tool was run last') """Last time this object was run""" notes = ListField(EmbeddedDocumentField(Note), help_text='Any notes about this method') """Any notes about this tool""" previous_step = ReferenceField( 'WorkflowTool', help_text= 'Previous tool in chain. If none, this tool pulls from the data extractor' ) """Previous step / dependency for this tool. LW 11July2016: Consider making this a ListField to have multiple dependencies""" toolchain = ReferenceField('ToolChain', required=True, help_text='Toolchain this tool is part of') """Tool chain that this tool is associated with""" _result_cache = None """Holds a dictionary results from this calculation""" result = MapField( EmbeddedDocumentField(Artifact), help_text= 'Results produced by this tool, or inherited by any previous tools') """Holds the pickled version of `result_cache`""" def __init__(self, *args, **kwargs): super(WorkflowTool, self).__init__(*args, **kwargs) # Register this class with the KnownClass library if not ('skip_register' in kwargs and kwargs['skip_register']): KnownClass.register_class(self) def clone(self, name=None, description=None): """Create a new copy of this tool :param name: string, new name for new copy :param description: string, description for new copy :return: WorkflowTool, new copy of this object""" # Make a copy output = deepcopy(self) output.id = None output.previous_step = None # Set name of description, if desired if name: output.name = name if description: output.description = description # Clear the notes, and anything related to result output.result_cache = None output.result = None output.last_run = None output.notes = [] return output def get_settings(self): """Get the settings that could be printed or adjusted via a web form :return: dict of settings to be printed""" output = dict(self._data) for nogo in [ 'id', 'name', 'description', 'notes', 'last_run', 'toolchain', 'result', 'previous_step' ]: del output[nogo] return output def get_form(self): """Get a WTForm class that can be used to edit this class :return: WTForm, used for editing """ # Prepare the previous step choices previous_steps = [('extractor', self.toolchain.extractor.name)] for choice in self.get_acceptable_previous_steps(): previous_steps.append((str(choice.id), choice.name)) class EditForm(Form): name = wtfields.StringField( 'Tool Name', default=self.name, description='Simple name for this tool') description = wtfields.TextAreaField( 'Tool Description', default=self.description, render_kw={'type': 'textarea'}, description='Longer form description of what this tool does') previous_step = wtfields.SelectField( 'Previous Step', choices=previous_steps, default=str(self.previous_step.id) if self.previous_step else 'extractor', description= 'Previous step in toolchain. Supplies inputs into this tool') return EditForm def process_form(self, form, request): """Given a form, change the settings :param form: WTFrom.form.Form, form with new data for the class :param request: Request, request accompanying the form submission """ # Make the changes self.name = form.name.data self.description = form.description.data # Get the previous step prev_step_choice = form.previous_step.data if prev_step_choice == 'extractor': self.previous_step = None else: self.previous_step = WorkflowTool.objects.get(id=prev_step_choice) # Clear the results self.clear_results() def get_file_information(self): """Get the list information files that are used as components of this class. Each file gets a dictionary, with keys description -> string, Short description of what this file is extension -> string, desired extension for for the file :return: dict, where key is the attribute name and value is a description of the file """ return {} def get_inputs(self, save_results=False): """Get the results from the previous step, which are used as input into this transformer :param save_results: boolean, whether to ensure the previous tool saves new results :return: Dict, results from the previous step. Dictionary at least contains an entry 'data' that matches the dataset from the previous step """ if self.previous_step is None: # Get data from the host workflow data = self.toolchain.extractor.get_data(save_results=save_results) # Store data as a pandas artifact return dict(data=data) # Get result from previous step return self.previous_step.run(save_results=save_results) def get_next_steps(self): """Get the tool have this tool as a previous step Output: :return: List of WorkflowTool objects """ return WorkflowTool.objects.filter(previous_step=self) def get_all_next_steps(self): """Get any tools later in the toolchain Output: :return: set, all tools that are after this one """ output = set(self.get_next_steps()) for step in set(output): output.update(step.get_all_next_steps()) return output def get_acceptable_previous_steps(self): """Get all steps that are not after this one in the chain Output: :return: set, all steps in the toolchain that are not after this """ # Get the bad choices bad_tools = self.get_all_next_steps() return WorkflowTool.objects.filter( toolchain=self.toolchain, id__not__in=[x.id for x in bad_tools], id__ne=self.id) def get_all_previous_step(self): """Get any steps before this one Output: :return: list, all steps that are previous, where [0] is the immediately previous step""" # Nothing if not self.previous_step: return set() # Propogate! output = [self.previous_step] output.extend(self.previous_step.get_all_previous_steps()) return output def run(self, ignore_results=False, save_results=False, run_subsequent=False): """Run an analysis tool If the tool has already been run, returns cached result Input: :param ignore_results: boolean, whether to redo calculation :param save_results: boolean, whether to save results :param run_subsequent: boolean, whether to run subsequent tool Output: :return: dict, result from the tool as Artifact objects """ # Clear output if needed if ignore_results: self.clear_results() # Run it or unpickle cached result if self.last_run is None: # Inform the logger logging.info("Running %s" % self.name) # Get the inputs inputs = self.get_inputs(save_results=save_results) if 'data' not in inputs: raise Exception('Input does not include data field') # Remove data from its holder (to be easier to work with) data_artifact = inputs['data'] data = data_artifact.get_object() del inputs['data'] # Run the transformer data, outputs = self._run(data, inputs) # Put data back into the results object as a non-artifact outputs['data'] = data_artifact outputs['data'].set_object(data) self.result = outputs # Update the last run time self.last_run = datetime.now() # If desired, save results if save_results: self.save() # Now, clear or re-run subsequent calculations (which are now out of date) for tool in self.get_next_steps(): if run_subsequent: # Force it to rerun itself tool.run(ignore_results=True, save_results=True, run_subsequent=True) else: tool.clear_results(clear_next_steps=True, save=True) return self.result def _run(self, data, other_inputs): """Do the actual running :param data: DataFrame, data to be transformed :param other_inputs: dict, other inputs to the transformer :return: DataFame holding results after processing, Dictionary holding other results from this tool """ raise NotImplementedError() def get_data(self): """Get at after this step :return: DataFrame """ return self.run()['data'].get_object() def clear_results(self, clear_next_steps=False, save=False): """Clear any cached results :param clear_next_steps: bool, Whether to clear results of subsequent steps as well :param save: bool, whether to save the results """ # Inform the logger logging.info("Clearing %s" % self.name) # Clear results in this class self.last_run = None self.result = {} # If desired, save if save: self.save() # If desired, clear any subsequent steps recursively if clear_next_steps: for tool in self.get_next_steps(): tool.clear_results(clear_next_steps=True, save=save) def delete(self, update_dependencies=True, **write_concern): """Delete this object. :param update_dependencies: boolean, whether to take subsequent tool in a chain and attach them to the previous tool""" if update_dependencies: for tool in self.get_next_steps(): tool.previous_step = self.previous_step tool.save() logging.info("Deleting %s" % self.name) super(WorkflowTool, self).delete(**write_concern)
class ProjectModel(EmbeddedDocument): datasets = MapField(EmbeddedDocumentField(DatasetModel))
class DiaLocalizacao(EmbeddedDocument): value = StringField() horas = MapField(EmbeddedDocumentField(HoraLocalizacao)) def __str__(self): return 'Dia {}'.format(self.value)
class ParsedProduct(Product): attr = DictField() parsedAttr = ListField(EmbeddedDocumentField(Attr)) keywords = MapField(IntField()) meta = {'allow_inheritance': True}
class MesLocalizacao(EmbeddedDocument): value = StringField() dias = MapField(EmbeddedDocumentField(DiaLocalizacao)) def __str__(self): return 'Mes {}'.format(self.value)
class TextGridCheckingScheme(Document): name = StringField(required=True) # mapping: tier_name -> specs tiers_specs: Dict[str, TierScheme] = MapField(EmbeddedDocumentField(TierScheme)) # empty tiers can be dropped at merging, currently not implemented in the client drop_empty_tiers = BooleanField(default=False) # for now this isn't set. In the future it'll be a a pluginizable class that can handle checking outside # of the defined generic framework tg_checker_name = StringField() @classmethod def from_tierspecs_schema(cls, scheme_data: List, scheme_name: str): new_scheme = cls(name=scheme_name) for tier_specs in scheme_data: if tier_specs.get("checking_type") == "CATEGORICAL": new_tier_scheme = CategoricalTier( name=tier_specs["name"], required=tier_specs["required"], allow_empty=tier_specs["allow_empty"], categories=tier_specs["categories"]) elif tier_specs.get("checking_type") == "PARSED": new_tier_scheme = ParsedTier( name=tier_specs["name"], required=tier_specs["required"], allow_empty=tier_specs["allow_empty"], parser_name=tier_specs["parser"]["name"], parser_module=tier_specs["parser"]["module"]) else: new_tier_scheme = UnCheckedTier( name=tier_specs["name"], allow_empty=tier_specs["allow_empty"], required=tier_specs["required"]) new_scheme.tiers_specs[tier_specs["name"]] = new_tier_scheme return new_scheme @property def required_tiers_names(self): return [ name for name, specs in self.tiers_specs.items() if specs.required ] @property def all_tiers_names(self): return list(self.tiers_specs.keys()) def get_tier_scheme(self, tier_name: str) -> TierScheme: return self.tiers_specs[tier_name] def gen_template_tg(self, duration: float, filename: str): new_tg = TextGrid(name=filename, minTime=0.0, maxTime=duration) for tier_name in self.tiers_specs.keys(): new_tier = IntervalTier(name=tier_name, minTime=0.0, maxTime=duration) new_tg.append(new_tier) return new_tg @property def summary(self): return { "id": str(self.id), "name": self.name, "tier_specs": [tier.to_specs() for tier in self.tiers_specs.values()] }
class UsuarioLocalizacao(Document): id_usuario = IntField(unique=True) anos = MapField(EmbeddedDocumentField(AnoLocalizacao)) ultima_localizacao = EmbeddedDocumentField(UltimaLocalizacao) def __str__(self): return "Localizações de {}".format(self.id_usuario) def _verify_ano(self, ano): try: assert self.anos[ano] return True except KeyError: return False def _verify_mes(self, ano, mes): try: assert self.anos[ano].meses[mes] return True except KeyError: return False def _verify_dia(self, ano, mes, dia): try: assert self.anos[ano].meses[mes].dias[dia] return True except KeyError: return False def _verify_hora(self, ano, mes, dia, hora): try: assert self.anos[ano].meses[mes].dias[dia].horas[hora] return True except KeyError: return False def _verify_minutos(self, ano, mes, dia, hora, minutos): try: assert self.anos[ano].meses[mes].dias[dia].horas[ hora].localizacoes[minutos] return True except KeyError: return False def _create_localizacao(self, minutos, lat, long): return Localizacao(minutos=minutos, lat=lat, long=long) def _create_hora(self, hora, minutos, lat, long): h = HoraLocalizacao(value=hora) h.localizacoes[minutos] = self._create_localizacao(minutos, lat, long) return h def _create_dia(self, dia, hora, minutos, lat, long): d = DiaLocalizacao(value=dia) h = self._create_hora(hora, minutos, lat, long) d.horas[h.value] = h return d def _create_mes(self, mes, dia, hora, minutos, lat, long): m = MesLocalizacao(value=mes) d = self._create_dia(dia, hora, minutos, lat, long) m.dias[d.value] = d return m def _create_ano(self, ano, mes, dia, hora, minutos, lat, long): a = AnoLocalizacao(value=ano) m = self._create_mes(mes, dia, hora, minutos, lat, long) a.meses[m.value] = m return a def get_ano(self, ano): if self._verify_ano(ano): return self.anos[ano] return None def get_mes(self, ano, mes): if self._verify_mes(ano, mes): return self.anos[ano].meses[mes] return None def get_dia(self, ano, mes, dia): if self._verify_dia(ano, mes, dia): return self.anos[ano].meses[mes].dias[dia] return None def get_hora(self, ano, mes, dia, hora): if self._verify_hora(ano, mes, dia, hora): return self.anos[ano].meses[mes].dias[dia].horas[hora] return None def get_minutos(self, ano, mes, dia, hora, minutos): if self._verify_minutos(ano, mes, dia, hora, minutos): return self.anos[ano].meses[mes].dias[dia].horas[ hora].localizacoes[minutos] return None def update_last_location(self, ano, mes, dia, hora, minutos, lat, long): data = datetime(int(ano), int(mes), int(dia), int(hora), int(minutos)) self.ultima_localizacao = UltimaLocalizacao(id_usuario=self.id_usuario, data=data, lat=lat, long=long) def add_location(self, ano, mes, dia, hora, minutos, lat, long): self.update_last_location(ano, mes, dia, hora, minutos, lat, long) if self._verify_ano(ano): # print('has ano') if self._verify_mes(ano, mes): # print('has mes') if self._verify_dia(ano, mes, dia): # print('has dia') if self._verify_hora(ano, mes, dia, hora): # print('has hora') self.anos[ano].meses[mes].dias[dia].horas[ hora].localizacoes[ minutos] = self._create_localizacao( minutos, lat, long) else: # print('create hora') self.anos[ano].meses[mes].dias[dia].horas[ hora] = self._create_hora(hora, minutos, lat, long) else: # print('create dia') self.anos[ano].meses[mes].dias[dia] = self._create_dia( dia, hora, minutos, lat, long) else: # print('create mes') self.anos[ano].meses[mes] = self._create_mes( mes, dia, hora, minutos, lat, long) else: # print('create ano') self.anos[ano] = self._create_ano(ano, mes, dia, hora, minutos, lat, long) def add_location_json(self, data): self.add_location(data['ano'], data['mes'], data['dia'], data['hora'], data['minutos'], data['lat'], data['long'])
class Resource(EmbeddedDocument): name = StringField(required=True) is_all = BooleanField(default=False) actions = MapField(StringField())