class Post(db.Document): slug = db.StringField(required=True, max_length=20, unique=True) creator = db.StringField(required=True, max_length=20) title = db.StringField(required=True, max_length=50) created_at = db.DateTimeField() comments = db.ListField() rates = db.ListField()
class Spot(db.Document): name = db.StringField(required=True) user = db.ReferenceField(User) location = GeoPointField(required=True) wifi = db.IntField(min_value=0, max_value=5) power = db.BooleanField() category = db.StringField() comments = db.ListField(db.StringField()) creation_date = db.DateTimeField(default=datetime.datetime.utcnow) user_url = db.StringField() meta = { 'ordering': ['-creation_date'] } def __str__(self): return self.name def to_dict(self): data = self.to_mongo() data['creation_date'] = str(self.creation_date.timestamp()) data['id'] = str(self.id) del data['_id'] data['user'] = str(data['user']) return data def to_json(self): return json.dumps(self.to_dict())
class CourseApplication(db.EmbeddedDocument): """An application to a course.""" course = db.ReferenceField(Course) uploaded_content = db.ListField(db.FileField()) average_okay = db.BooleanField(default=False) have_met_reqs = db.BooleanField(default=False) # Status: pending, yes, maybe, no, nosampleprovided writing_sample_status = db.StringField(default='pending') # Status: pending, approved, rejected status = db.StringField(default='pending')
class Spot(db.Document): name = db.StringField(required=True) user = db.ReferenceField(User) location = db.GeoPointField(required=True) wifi = db.IntField(min_value=0, max_value=5) power = db.BooleanField() category = db.StringField() comments = db.ListField(db.StringField()) creation_date = db.DateTimeField(default=datetime.datetime.utcnow) meta = {'ordering': ['-creation_date']} def __str__(self): return self.name
class Article(db.Document): title = db.StringField(required=True) content = db.StringField(required=True) creation_date = db.DateTimeField(default=datetime.datetime.utcnow) slug = db.StringField(required=True, default='no-title') author = db.ReferenceField(User, reverse_delete_rule='NULLIFY') language = db.StringField(min_length=2, max_length=2, default='en') images = db.ListField() publication_date = db.DateTimeField() published = db.BooleanField(default=False) def __str__(self): return str(self.title) def get_author(self): try: return self.author except DoesNotExist: return User.objects.get(slug='neomad') def extract_images(self): """ Extract images from the content, resize and save them locally if they are base64 encoded. Saves the list of images into the images list property. """ html = BeautifulSoup(self.content, 'html.parser') images = [] try: os.makedirs(self.get_images_path()) except FileExistsError: pass for img in html.find_all('img'): data = img.get('src') if is_base64(data): m = hashlib.md5() m.update(data.encode('utf-8')) img_name = m.hexdigest() img_path = '{}/{}'.format(self.get_images_path(), img_name) img_url = '{}/{}'.format(self.get_images_url(), img_name) save_base64_image(data, img_path, (1000, 800)) img['src'] = img_url images.append(img_url) else: images.append(data) for outdated_image in set(self.images) - set(images): os.remove( os.path.join(self.get_images_path(), os.path.basename(outdated_image))) self.images = images def get_images_path(self): return '{}/{}'.format(app.config['ARTICLE_IMG_PATH'], self.id) def get_images_url(self): return '{}/{}'.format(app.config['ARTICLE_IMG_URL'], self.id) @property def image(self): if len(self.images): return self.images[0] def delete(self, *args, **kwargs): parent = super(Article, self).delete(*args, **kwargs) path = self.get_images_path() shutil.rmtree(path) return parent def save(self, *args, **kwargs): self.slug = slugify(self.title) is_new = not self.id # when new, the id must exist before extracting images if is_new: super(Article, self).save(*args, **kwargs) self.extract_images() self.title = Markup(self.title).striptags() self.content = clean_html(self.content, ALLOWED_TAGS) self.language = detect(self.content) morph = self.morph() if self != morph: morph.pre_save(*args, **kwargs) return super(Article, self).save(*args, **kwargs) def morph(self): if (self.content and 'https://steemit.com/' in self.content): match = re.match('.*(https://steemit.com/[^<\s.]*)', self.content) url = match.groups()[0] return SteemitArticle(article=self, url=url) return self meta = { 'ordering': ['-publication_date'], 'indexes': ['-publication_date'] }
class Article(db.Document): title = db.StringField(required=True) content = db.StringField(required=True) creation_date = db.DateTimeField(default=datetime.datetime.utcnow) slug = db.StringField(required=True, default='no-title') author = db.ReferenceField(User) language = db.StringField(min_length=2, max_length=2, default='en') images = db.ListField() def __str__(self): return str(self.title) def extract_images(self): """ Extract images from the content, resize and save them locally if they are base64 encoded. Saves the list of images into the images list property. """ html = BeautifulSoup(self.content, 'html.parser') images = [] try: os.makedirs(self.get_images_path()) except FileExistsError: pass for img in html.find_all('img'): data = img.get('src') if is_base64(data): m = hashlib.md5() m.update(data.encode('utf-8')) img_name = m.hexdigest() img_path = '{}/{}'.format(self.get_images_path(), img_name) img_url = '{}/{}'.format(self.get_images_url(), img_name) save_base64_image(data, img_path, (1000, 800)) img['src'] = img_url images.append(img_url) else: images.append(data) self.images = images self.content = str(html) def get_images_path(self): return '{}/{}'.format(app.config['ARTICLE_IMG_PATH'], self.id) def get_images_url(self): return '{}/{}'.format(app.config['ARTICLE_IMG_URL'], self.id) @property def image(self): if len(self.images): return self.images[0] def delete(self, *args, **kwargs): parent = super(Article, self).delete(*args, **kwargs) path = self.get_images_path() shutil.rmtree(path) return parent def save(self, *args, **kwargs): if not self.creation_date: self.creation_date = datetime.datetime.utcnow() self.slug = slugify(self.title) is_new = not self.id # when new, the id must exist before extracting images if is_new: super(Article, self).save(*args, **kwargs) self.extract_images() self.title = Markup(self.title).striptags() self.content = clean_html(self.content, ALLOWED_TAGS) self.language = detect(self.content) return super(Article, self).save(*args, **kwargs) meta = {'ordering': ['-creation_date']}