コード例 #1
0
ファイル: models.py プロジェクト: 515783034/HHOfficeOrder
class OrderModel(db.Model):
    __tablename__ = 'order'

    __searchable__ = ['user_name', 'depart_name', 'reason']
    __analyzer__ = ChineseAnalyzer()

    id = db.Column(db.Integer, primary_key=True, autoincrement=True)
    # 预订人
    user_name = db.Column(db.String(100), nullable=False)
    depart_name = db.Column(db.String(100), nullable=False)

    # 预订原因
    reason = db.Column(db.Text, nullable=False)

    office_id = db.Column(db.Integer, db.ForeignKey('office.id'))
    office = db.relationship('OfficeModel', backref=db.backref('orders'))

    # 预订时间
    order_from = db.Column(db.Integer, nullable=False)
    order_to = db.Column(db.Integer, nullable=False)
    # 当天0点时间戳
    order_day = db.Column(db.Integer, nullable=False)

    def to_dict(self, **args):
        base_arg = {
            c.name: getattr(self, c.name, None)
            for c in self.__table__.columns
        }
        return dict(base_arg, **args)
コード例 #2
0
ファイル: indexer.py プロジェクト: despacito008/tg_searcher
    def __init__(self, pickle_path='index', index_name='telegram_searcher', from_scratch=False):
        analyzer = ChineseAnalyzer()
        schema = Schema(
            content=TEXT(stored=True, analyzer=analyzer),
            url=ID(stored=True, unique=True),
            chat_id=STORED(),
            post_time=DATETIME(stored=True),
        )

        if not Path(pickle_path).exists():
            Path(pickle_path).mkdir()

        def _clear():
            pattern = re.compile(f'^_?{index_name}.*')
            for file in Path(pickle_path).iterdir():
                if pattern.match(file.name):
                    os.remove(str(file))
            self.ix = create_in(pickle_path, schema, index_name)

        if from_scratch:
            _clear()

        self.ix = open_dir(pickle_path, index_name) \
            if exists_in(pickle_path, index_name) \
            else create_in(pickle_path, schema, index_name)

        self._clear = _clear  # use closure to avoid introducing to much members
        self.query_parser = QueryParser('content', schema)
        self.highlighter = highlight.Highlighter()
コード例 #3
0
class Post(db.Model):
    __tablename__ = 'posts'
    #搜索内容
    __searchable__ = ['body']
    __analyzer__ = ChineseAnalyzer()

    id = db.Column(db.Integer, primary_key=True)
    body = db.Column(db.Text)
    timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
    author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
    postcategory_id = db.Column(db.Integer, db.ForeignKey('postcategorys.id'))
    comments = db.relationship('Comment', backref='post', lazy='dynamic')

    #生成虚拟博客供测试
    @staticmethod
    def generate_fake(count=100):
        from random import seed, randint
        import forgery_py

        seed()
        user_count = User.query.count()
        for i in range(count):
            u = User.query.offset(randint(0, user_count - 1)).first()
            p = Post(body=forgery_py.lorem_ipsum.sentences(randint(1, 3)),
                     timestamp=forgery_py.date.date(True),
                     author=u)
            db.session.add(p)
            db.session.commit()
コード例 #4
0
ファイル: searcher.py プロジェクト: sam-rjl/CSND_Search
 def create_ix(self):
     analyzer = ChineseAnalyzer()
     schema = self.schema
     # 创建索引存储目录
     if not os.path.exists("index"):
         os.mkdir("index")
     # 创建新索引
     ix = create_in("index", schema, 'my_indexing')
     # 从数据库中取数据
     db = pymysql.connect(host, user, password, dbname)
     content = get_dbtext(db)
     # 新建writer
     writer = ix.writer()
     # 遍历数据库, 插入doc
     count = 0
     for blog in content:
         # print(blog[0])
         writer.add_document(url=u'%s' % blog[0],
                             title=u'%s' % blog[1],
                             nickname=u'%s' % blog[2],
                             readcount=u'%s' % blog[4],
                             text=u'%s' % blog[6],
                             time=u'%s' % blog[5])
         count += 1
         print('第', count, '篇blog添加成功...')
     writer.commit()
コード例 #5
0
class Post(db.Model):
    __tablename__ = 'posts'
    __searchable__ = ['title']
    __analyzer__ = ChineseAnalyzer()
    id = db.Column(db.Integer, primary_key=True)
    title = db.Column(db.String(64))
    cover = db.Column(db.String(64))
    body = db.Column(db.Text)
    body_html = db.Column(db.Text)
    summary = db.Column(db.Text)
    publish = db.Column(db.Boolean, default=True, index=True)
    url_name = db.Column(db.String(64), index=True, unique=True)

    create_date = db.Column(db.DateTime, default=datetime.utcnow)
    publish_date = db.Column(db.DateTime, default=datetime.utcnow)
    update_date = db.Column(db.DateTime, default=datetime.utcnow)

    tags = db.relationship('Tag',
                           secondary=belong_to,
                           backref=db.backref('posts', lazy='dynamic'),
                           lazy='dynamic')

    @staticmethod
    def on_changed_body(target, value, oldvalue, initiator):
        allowed_tags = [
            'a', 'abbr', 'acronym', 'b', 'blockquote', 'code', 'em', 'i', 'li',
            'ol', 'pre', 'strong', 'ul', 'h1', 'h2', 'h3', 'p'
        ]
        target.body_html = bleach.linkify(
            bleach.clean(markdown(value, output_format='html'),
                         tags=allowed_tags,
                         strip=True))

    def __unicode__(self):
        return self.title
コード例 #6
0
ファイル: models.py プロジェクト: BeanWei/wms
class Goods(db.Model):
    '''货物模型'''
    __tablename__ = 'goods'
    __searchable__ = ['title']  #搜索字段
    __analyzer__ = ChineseAnalyzer()  #引入中文分词

    id = db.Column(db.Integer, index=True, primary_key=True)
    title = db.Column(db.Text)
    price = db.Column(db.Float, index=True)
    stock = db.Column(db.Integer, index=True)
    storage_time = db.Column(db.DateTime, default=datetime.utcnow, index=True)
    storage_location = db.Column(db.String(225), index=True)

    meta = {'ordering': ['-storage_time']}

    def to_json(self):
        '''返回货物信息'''
        return {
            'id': self.id,
            'title': self.title,
            'price': self.price,
            'stock': self.stock,
            'storage_time': self.storage_time,
            'storage_location': self.storage_location
        }
コード例 #7
0
class IndexMsg:
    schema = Schema(
        content=TEXT(stored=True, analyzer=ChineseAnalyzer()),
        url=ID(stored=True, unique=True),
        # for `chat_id` we are using TEXT instead of NUMERIC here, because NUMERIC
        # do not support iterating all values of the field
        chat_id=TEXT(stored=True),
        post_time=DATETIME(stored=True, sortable=True),
        sender=TEXT(stored=True),
    )

    def __init__(self, content: str, url: str, chat_id: Union[int, str],
                 post_time: datetime, sender: str):
        self.content = content
        self.url = url
        self.chat_id = int(chat_id)
        self.post_time = post_time
        self.sender = sender

    def as_dict(self):
        return {
            'content': self.content,
            'url': self.url,
            'chat_id': str(self.chat_id),
            'post_time': self.post_time,
            'sender': self.sender
        }

    def __str__(self):
        return f'IndexMsg' + ', '.join(f'{k}={repr(v)}'
                                       for k, v in self.as_dict().items())
コード例 #8
0
ファイル: searcher.py プロジェクト: sam-rjl/CSND_Search
 def __init__(self):
     self.schema = Schema(url=ID(stored=True),
                          title=TEXT(stored=True),
                          nickname=TEXT(stored=True),
                          readcount=TEXT(stored=True),
                          text=TEXT(stored=True,
                                    analyzer=ChineseAnalyzer()),
                          time=DATETIME(stored=True))
コード例 #9
0
class Area(db.Model):
    __tablename__ = 'area'
    __searchable__ = ['name']
    __analyzer__ = ChineseAnalyzer()
    id = db.Column(db.Integer, primary_key=True, autoincrement=True)
    name = db.Column(db.String(256), nullable=False, unique=True, index=True)
    number = db.Column(db.Integer, default=0)
    create_time = db.Column(db.DateTime, default=datetime.now)
コード例 #10
0
def index():

    '''
    对于弹幕信息进行检索,如果lxml解析报错,去掉该文件将不被检索
    ''' 
    
    f_list = os.listdir(XML_DIR)
    schema = Schema(path =ID(stored=True),\
                    content=TEXT(analyzer = ChineseAnalyzer(),stored=True),\
                    radio= NUMERIC(float,stored=True)
                    )
    new_or_not = 0
    if not os.path.exists(INDEX_DIR):
        os.mkdir(INDEX_DIR)
        new_or_not = 1

    # if new_or_not:
    #     ix = create_in(INDEX_DIR, schema)
    # else:
    #     ix = open_dir(INDEX_DIR)

    ix = create_in(INDEX_DIR, schema)

    writer = ix.writer()

    filter_words = load_all_words()

    num = 0

    for fname in f_list:
        if fname.find(".xml")==-1:continue
        filename = os.path.join(XML_DIR, fname)
        with codecs.open(filename, 'r', 'utf8') as f:
            content = f.read()
            try:
                node = etree.XML(content.encode('utf8'))
                danmu_xpath = "//d/text()"
                text_list = []
                max_limit_xpath = "//maxlimit/text()"
                max_limit = node.xpath(max_limit_xpath)
                assert len(max_limit) == 1,'max_limit is wrong '+ fname
                max_limit = max_limit[0]
                for danmu in node.xpath(danmu_xpath):
                    sentence = danmu.strip()
                    if len(sentence) > 0:
                        text_list.append(sentence)        
                if len(text_list)>0:
                    text_value = u' \n '.join(text_list)
                    radio = len(text_list)*1.0/int(max_limit)
                    writer.add_document(path=fname.decode('utf8'),
                                        content=text_value,
                                        radio = radio
                                        )
                num = num + 1
            except etree.XMLSyntaxError, e:
                print filename,e
            except Exception,e:
                print e
コード例 #11
0
class PostingsModel(db.Model):
    __tablename__ = 'posting'
    __searchable__ = ['title', 'content']
    __analyzer__ = ChineseAnalyzer()
    id = db.Column(db.Integer, primary_key=True, autoincrement=True)
    title = db.Column(db.String(50), nullable=False)
    content = db.Column(db.Text, nullable=False)
    create_time = db.Column(db.DateTime, default=datetime.now)
    board_id = db.Column(db.Integer, db.ForeignKey('board.id'))
コード例 #12
0
class Article(db.Model):
    __tablename__ = 'articles'
    __searchable__ = ['title', 'body']
    __analyzer__ = ChineseAnalyzer()
    id = db.Column(db.Integer, primary_key=True)
    number = db.Column(db.Integer, default=random_number, unique=True)
    title = db.Column(db.String(200), default='')
    body = db.Column(LONGTEXT, default='')
    body_html = db.Column(LONGTEXT)
    body_abstract = db.Column(db.Text)
    timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
    author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
    author = db.relationship('User', backref='articles')
    repository_id = db.Column(db.String(36))
    status = db.Column(db.String(200), default='')
    version_remark = db.Column(db.TEXT, default='')
    version_timestamp = db.Column(db.DateTime,
                                  index=True,
                                  default=datetime.utcnow)

    @staticmethod
    def query_published():
        return Article.query.filter_by(status='published')

    def to_json(self, level='brief'):
        json = {
            'id': self.id,
            'number': self.number,
            'title': self.title,
            'timestamp':
            self.timestamp.replace(tzinfo=timezone.utc).isoformat(),
            'author': self.author.to_json(level)
        }
        json['plugin'] = Signal.send('to_json', article=self, level=level)
        if level.startswith('admin_'):
            json['repositoryId'] = self.repository_id
            json['status'] = self.status
            json['versionTimestamp'] = self.version_timestamp
            if level == 'admin_brief':
                return json
            json['bodyAbstract'] = self.body_abstract
            if level == 'admin_basic':
                return json
            json['body'] = self.body
            if level == 'admin_full':
                return json
        else:
            if level == 'brief':
                return json
            json['bodyAbstract'] = self.body_abstract
            if level == 'basic':
                return json
            json['body'] = self.body
            json['bodyHtml'] = self.body_html
            if level == 'full':
                return json
コード例 #13
0
class BlogingPost(db.Model):
    __tablename__ = 'blogingpost2'
    __searchable__ = ['title', 'content']  # these fields will be indexed by whoosh
    #__analyzer__ = SimpleAnalyzer()        # configure analyzer; defaults to
    __analyzer__ = ChineseAnalyzer()                                  # StemmingAnalyzer if not specified

    id = db.Column(db.Integer, primary_key=True)
    title = db.Column(db.String(100))  # Indexed fields are either String,
    content = db.Column(db.String(15000))   # Unicode, or Text
    datime = db.Column(db.String(20))
    url = db.Column(db.String(100))
コード例 #14
0
class Todo(db.Document):
    meta = {
        'collection': 'data_link',
        'ordering': ['-create_at'],
        'strict': False,
    }
    __searchable__ = ['file_name']
    __analyzer__ = ChineseAnalyzer()

    file_name = db.ListField()
    src = db.ListField()
コード例 #15
0
class Lab_Form(db.Model):
    __tablename__ = 'info_lab'
    __analyzer__ = ChineseAnalyzer()

    id = db.Column(db.INTEGER, primary_key=True)
    lab_name = db.Column(db.TEXT)
    lab_school = db.Column(db.TEXT)
    lab_introduction = db.Column(db.TEXT)
    lab_location = db.Column(db.TEXT)
    lab_postcode = db.Column(db.TEXT)
    lab_supportunit = db.Column(db.TEXT)
    lab_tel = db.Column(db.TEXT)
    lab_fax = db.Column(db.TEXT)
    lab_mail = db.Column(db.TEXT)
    lab_url = db.Column(db.TEXT)
    lab_director = db.Column(db.TEXT)
    lab_contactor = db.Column(db.TEXT)

    def __repr__(self):
        return '<Lab_Form {}'.format(self.lab_name, self.lab_school,
                                     self.lab_introduction, self.lab_location,
                                     self.lab_postcode, self.lab_supportunit,
                                     self.lab_director, self.lab_contactor)

    def get_info(self, name):
        """
        根据名称查询数据库
        :param name: 实验室名称
        :return: 实验室基本信息
        """
        info = Lab_Form.query.filter_by(lab_name=name).first()
        return info

    def select_info(self):
        """
        获取第page页数据
        :param page: 页数
        :return: 分页后的数据
        """
        try:
            info = Lab_Form.query
            return info
        except IOError:
            return None

    def search_box(self, name):
        """
        根据搜索框对实验室模糊搜索
        :param name: 搜索框中输入的不完全实验室名称
        :return: 通过模糊搜索之后得到结果的陈列
        """
        info = Lab_Form.query.filter(
            or_(Lab_Form.lab_name.like('%' + name + '%')))
        return info
コード例 #16
0
ファイル: searcher.py プロジェクト: xchsp/searchenginedemo
    def create_ix(self):
        analyzer = ChineseAnalyzer()
        schema = self.schema
        # 创建索引存储目录
        if not os.path.exists("index"):
            os.mkdir("index")
        # 创建新索引
        ix = create_in("index", schema, 'my_indexing')
        # 从数据库中取数据
        db = pymysql.connect(host, user, password, dbname)
        # content = get_dbtext(db)
        # 新建writer
        writer = ix.writer()
        # 遍历数据库, 插入doc
        count = 0
        conn = sqlite3.connect('snandy.db')
        # 创建一个cursor:
        cursor = conn.cursor()
        # 执行查询语句:
        cursor.execute('select * from au_layernode')
        # 使用featchall获得结果集(list)
        values = cursor.fetchall()
        for v in values:
            soup = BeautifulSoup(v[4], 'html.parser')

            writer.add_document(url=u'%s' % v[2],
                                title=u'%s' % v[3],
                                nickname=u'snandy',
                                readcount=u'%s' % len(v[4]),
                                text=u'%s' % soup.get_text(),
                                time=u'%s' %
                                v[9].split(' ')[0].replace('-', ''))
            count += 1
            print('第', count, '篇blog添加成功...')

        # print(values) #result:[('1', 'Michael')]
        # 关闭cursor
        # 关闭conn
        cursor.close()
        conn.close()
        # blog = ['','baidu','百度','百度','5','百度','21090523']
        # for i in range(10):
        #     writer.add_document(url=u'%s' % blog[1], title=u'%s' % blog[2], nickname=u'%s' % blog[3],
        #                         readcount=u'%s' % blog[4], text=u'%s' % blog[5], time=u'%s' % blog[6])
        #     count += 1
        #     print('第', count, '篇blog添加成功...')
        # for blog in content:
        #     writer.add_document(url=u'%s' % blog[1], title=u'%s' % blog[2], nickname=u'%s' % blog[3],
        #                         readcount=u'%s' % blog[4], text=u'%s' % blog[5], time=u'%s' % blog[6])
        #     count += 1
        #     print('第', count, '篇blog添加成功...')

        writer.commit()
コード例 #17
0
class Essay(db.Model):
    """文章"""
    __tablename__ = 'essay'
    __searchable__ = ['essay_title', 'essay_content']
    __analyzer__ = ChineseAnalyzer()
    id = db.Column(db.Integer, primary_key=True)  # id
    essay_title = db.Column(db.String(128), unique=True,
                            index=True)  # 文章名称,唯一,创建索引
    essay_content = db.Column(db.Text())  # 文章内容
    essay_cls = db.Column(db.String(32))  # 文章分类
    essay_push_time = db.Column(db.String(64))  # 发布时间
    essay_push_user = db.Column(db.String(32))  # 发布人
    essay_scan = db.Column(db.Integer, default=0)  # 浏览次数
コード例 #18
0
class Post(db.Model):
    __tablename__ = 'Post'
    __searchable__ = ['theme', 'content']
    __analyzer__ = ChineseAnalyzer()
    id = db.Column(db.Integer, primary_key=True, autoincrement=True)
    theme = db.Column(db.String(150), nullable=False, unique=True,
                      index=True)  #帖子的主题
    content = db.Column(db.Text, nullable=False)  #帖子的内容
    create_time = db.Column(db.DateTime, default=datetime.now)
    cnumber = db.Column(db.Integer, default=0)
    area_id = db.Column(db.Integer, db.ForeignKey("area.id"))  # 外键
    author_id = db.Column(db.String(64), db.ForeignKey("front_user.id"))
    author = db.relationship('FrontUser', backref='posts')
    area = db.relationship(
        'Area', backref='posts')  #定义版块和帖子的双向关系,即使area拥有posts属性能够查看版块下所含的帖子
コード例 #19
0
ファイル: database.py プロジェクト: inorilzy/flask_blog
class Article(db.Model):
    __tablename__ = 'article'
    __searchable__ = ['content', 'title']
    __analyzer__ = ChineseAnalyzer()

    id = db.Column(db.Integer, primary_key=True, autoincrement=True)
    author = db.Column(db.String(32))
    title = db.Column(db.String(32))
    content = db.Column(db.Text)
    tag = db.relationship('Tag',
                          secondary='article_to_tag',
                          backref=db.backref('articles'),
                          lazy='dynamic')
    classify_id = db.Column(db.INTEGER, db.ForeignKey('classify.id'))
    classify = db.relationship('Classify', backref=db.backref('articles'))
コード例 #20
0
ファイル: leaf_models.py プロジェクト: rjguanwen/Ginkgo
class Post(db.Model):
    # 检索内容,多个字段用逗号分隔
    __searchable__ = ['body']
    # 采用中文分词
    __analyzer__ = ChineseAnalyzer()

    id = db.Column(db.Integer, primary_key=True)
    body = db.Column(db.String(140))
    timestamp = db.Column(db.DateTime)
    user_name = db.Column(db.String(32), db.ForeignKey('user.user_name'))
    # 文章所使用的语言
    language = db.Column(db.String(5))

    def __repr__(self):
        return '<Post %r>' % self.body
コード例 #21
0
class ComInfor(db.Model):
    __tablename__ = 'ComInfor'
    __searchable__ = ['comName']
    __analyzer__=ChineseAnalyzer()

    comId = db.Column(db.Integer, primary_key=True)
    comName = db.Column(db.String(50))
    comBrief = db.Column(db.String(500))
    comCity=db.Column(db.String(20))
    comAddress = db.Column(db.String(100))
    comUrl = db.Column(db.String(100),default="暂无")
    comMon = db.Column(db.String(20))
    comProject = db.Column(db.String(250))
    comStaff = db.Column(db.String(20))
    comContact = db.Column(db.String(20))
    comPhone = db.Column(db.String(20))
    comEmail = db.Column(db.String(50))
    comFax = db.Column(db.String(20))
    comDate = db.Column(db.DATETIME, default=datetime.now)
    students = db.Column(db.Integer, default=0)
    comCheck = db.Column(db.Integer, default=0)
    internshipinfor = db.relationship('InternshipInfor', backref='cominfor', lazy='dynamic')

    # 创建大量虚拟信息
    @staticmethod
    def generate_fake(count=100):
        from sqlalchemy.exc import IntegrityError
        from random import seed, randint, choice
        import forgery_py

        seed()
        for i in range(count):
            comInfor = ComInfor(comName=forgery_py.internet.user_name(True),
                                comBrief=forgery_py.lorem_ipsum.sentences(),
                                comAddress=forgery_py.address.city(), comUrl=forgery_py.internet.domain_name(),
                                comMon=randint(100, 10000), comProject=forgery_py.lorem_ipsum.word(),
                                comStaff=randint(100, 10000),
                                comContact=forgery_py.name.full_name(), comPhone=forgery_py.address.phone(),
                                comEmail=forgery_py.internet.email_address(user=None),
                                comFax=forgery_py.address.phone())
            db.session.add(comInfor)
            try:
                db.session.commit()
            except IntegrityError:
                db.session.rollback()

    def __str__(self):
        return self.comName
コード例 #22
0
class Blog(db.Model):
    __tablename__ = 'blogs'
    __searchable__ = ['title', 'content']
    __analyzer__ = ChineseAnalyzer()

    id = db.Column(db.Integer, primary_key=True)
    time = db.Column(db.BigInteger)
    author = db.Column(db.String(255))
    title = db.Column(db.String(255), unique=True)
    content = db.Column(LONGTEXT)

    comments = db.relationship('Comment', backref='blogs', lazy='dynamic')
    tags = db.relationship('Tag', secondary=tags, backref=db.backref('blogs', lazy='dynamic'))

    def __init__(self, author, title, content):
        self.author = author
        self.title = title
        self.content = content

    def __repr__(self):
        return "<Blog '{}'>".format(self.title)

    def save(self):
        db.session.add(self)
        db.session.commit()

    def delete(self):
        db.session.delete(self)
        db.session.commit()

    def to_dict(self):
        """Transforms a model into a dictionary which can be dumped to JSON."""
        # first we get the names of all the columns on your model
        columns = [c.key for c in class_mapper(self.__class__).columns]
        # then we return their values in a dict
        return dict((c, getattr(self, c)) for c in columns)

    # 返回除了 content, comments 之外的值
    @staticmethod
    def query_title():
        sql_str = '''
        SELECT blogs.id, blogs.time, blogs.author, blogs.title, group_concat(tags.title) tag
        FROM blogs LEFT JOIN blog_tags ON blogs.id=blog_tags.blog_id
        left join tags ON tags.id=blog_tags.tag_id
        group by blogs.id;
        '''
        ret = db.engine.execute(sql_str).fetchall()
        return [dict(r) for r in ret]
コード例 #23
0
ファイル: post_model.py プロジェクト: ehoac/noko
class Post(db.Model):
    __searchable__ = ['title', 'body']
    __analyzer__ = ChineseAnalyzer()

    id = db.Column(db.Integer, primary_key=True)
    title = db.Column(db.String(200))
    body = db.Column(db.Text)
    open = db.Column(db.String(1))
    comments = db.relationship('Comment', backref='post', lazy='dynamic')
    timestamp = db.Column(db.DateTime)
    user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
    type_code = db.Column(db.String, db.ForeignKey('post_type.code'))

    def __repr__(self):
        return '<Post %r %r %r %r>' % (self.title, self.body, self.type,
                                       self.open)
コード例 #24
0
class Goods(db.Model):
    __tablename__ = 'goods'
    __searchable__ = ['product_id', 'name', 'storage_location']
    __analyzer__ = ChineseAnalyzer()

    product_id = db.Column(db.Integer, index=True, primary_key=True)
    name = db.Column(db.String)
    price = db.Column(db.Float)
    weight = db.Column(db.Float)
    norms = db.Column(db.String)
    residue_num = db.Column(db.Integer)
    sale_total = db.Column(db.Integer)
    storage_time = db.Column(db.DateTime, default=datetime.utcnow)
    storage_location = db.Column(db.String)

    meta = {'ordering': ['-storage_time']}
コード例 #25
0
class Post(db.Model):
    __tablename__ = 'posts'
    __searchable__ = ['title']  # 添加搜索,建立索引
    __analyzer__ = ChineseAnalyzer()  # 中文分词
    id = db.Column(db.Integer, primary_key=True)
    title = db.Column(db.String(64))
    cover = db.Column(db.String(64))
    body = db.Column(db.Text)
    body_html = db.Column(db.Text)
    summary = db.Column(db.Text)
    publish = db.Column(db.Boolean, default=True, index=True)
    url_name = db.Column(db.String(64), index=True, unique=True)

    create_date = db.Column(db.DateTime, default=datetime.utcnow)
    publish_date = db.Column(db.DateTime, default=datetime.utcnow)
    update_date = db.Column(db.DateTime, default=datetime.utcnow)

    tags = db.relationship('Tag',
                           secondary=belong_to,
                           backref=db.backref('posts', lazy='dynamic'),
                           lazy='dynamic')

    # @staticmethod
    # def generate_fake(count=20):
    #     from random import seed, randint
    #     import forgery_py
    #
    #     seed()
    #     for i in range(count):
    #         p = Post(title=forgery_py.internet.user_name(True),
    #                  body=forgery_py.lorem_ipsum.sentences(randint(1, 3)),
    #                  create_date=forgery_py.date.date(True),
    #                  url_name=forgery_py.internet.user_name(True)
    #                  )
    #         db.session.add(p)
    #         db.session.commit()

    @staticmethod
    def on_changed_body(target, value, oldvalue, initiator):
        """把body字段中的文本渲染成HTML格式,保存在body_html"""
        allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code',
                        'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
                        'h1', 'h2', 'h3', 'p']  # 白名单
        target.body_html = bleach.linkify(bleach.clean(
            markdown(value, output_format='html'),
            tags=allowed_tags, strip=True))#.replace('<pre>', '<pre class="language-python">').\
コード例 #26
0
class Sheet_Form(db.Model):
    __tablename__ = 'sf1'
    __searchable__ = ['company', 'address']
    __analyzer__ = ChineseAnalyzer()

    id = db.Column(db.INTEGER, primary_key=True)
    company = db.Column(db.TEXT)
    url = db.Column(db.TEXT)
    tel = db.Column(db.TEXT)
    fax = db.Column(db.TEXT)
    mail = db.Column(db.TEXT)
    contacts = db.Column(db.TEXT)
    address = db.Column(db.TEXT)
    remarks = db.Column(db.TEXT)

    def __repr__(self):
        return '<Sheet_Form {}'.format(self.company)
コード例 #27
0
 def build_index(self):
     index_config = self.config.index_dict
     analyzer = ChineseAnalyzer()
     schema = Schema(title=TEXT(stored=True),
                     path=ID(stored=True),
                     content=TEXT(stored=True, analyzer=analyzer))
     for file_name, content in self.files_dict.items(
     ):  # content:[[question], [answer]]
         index_path = index_config[file_name]
         if not os.path.exists(index_path):
             os.mkdir(index_path)
         tmp_index = create_in(index_path, schema)
         writer = tmp_index.writer()
         for i in range(len(content)):
             writer.add_document(title=content[i][1].strip(),
                                 path="/{}".format(str(i)),
                                 content=content[i][0].strip())
         writer.commit()
コード例 #28
0
ファイル: models.py プロジェクト: huang-ju-git/BDIC-forum
class Question(db.Model):
    __tablename__ = 'question'
    __searchable__ = ['content', 'title']
    __analyzer__ = ChineseAnalyzer()
    id = db.Column(db.Integer, primary_key=True, autoincrement=True)
    type = db.Column(db.Integer, nullable=False)  # 用数字代替具体的分类
    title = db.Column(db.String(100), nullable=False)
    content = db.Column(db.Text, nullable=False)
    create_time = db.Column(db.DateTime, default=datetime.now)
    author_id = db.Column(db.Integer, db.ForeignKey('user.id'))
    report_total = db.Column(db.Integer, nullable=True)
    report_reasons_and_times = db.Column(db.String(100), nullable=True)

    # answers = db.relationship('Answer',lazy='dynamic', cascade='all, delete-orphan',passive_deletes=True, backref=db.backref('question'))
    author = db.relationship('User', backref=db.backref('questions'))

    def __repr__(self):
        return '{0}(title={1})'.format(self.__class__.__name__, self.title)
コード例 #29
0
class Community(db.Model):
    """
    社区帖子模型:
    title:帖子的标题
    context:帖子的内容
    create_time:帖子创建时间(datetime.now:每一次创建模型都会记录时间,datetime.now():第一次创建模型时记录时间)
    author_id:作者id,与User表的id相关联
    """
    __tablename__ = 'community'
    __searchable__ = ['title']
    __analyzer__ = ChineseAnalyzer()
    id = db.Column(db.Integer, autoincrement=True, primary_key=True)
    title = db.Column(db.String(50), nullable=False)
    context = db.Column(db.Text, nullable=False)
    create_time = db.Column(db.DateTime, default=datetime.now)
    author_id = db.Column(db.Integer, db.ForeignKey('user.id'))

    author = db.relationship('User', backref=db.backref('community'))
コード例 #30
0
class Comment(db.Model):
    __searchable__ = ['body']
    __analyzer__ = ChineseAnalyzer()
    __tablename__ = 'comments'
    id = db.Column(db.Integer, primary_key=True)
    body = db.Column(db.Text)
    body_html = db.Column(db.Text)
    timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
    disabled = db.Column(db.Boolean)
    author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
    post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))

    @staticmethod
    def on_changed_body(target, value, oldvalue, initiator):
        allowed_tags = [
            'a', 'abbr', 'acronym', 'b', 'code', 'em', 'i', 'strong'
        ]
        target.body_html = bleach.linkify(
            bleach.clean(markdown(value, output_format='html'),
                         tags=allowed_tags,
                         strip=True))

    def to_json(self):
        json_comment = {
            'url': url_for('api.get_comment', id=self.id, _external=True),
            'body': self.body,
            'body_html': self.body_html,
            'timestamp': self.timestamp,
            'disabled': self.disabled,
            'author': url_for('api.get_user',
                              id=self.author_id,
                              _external=True),
            'post': url_for('api.get_post', id=self.post_id, _external=True)
        }
        return json_comment

    @staticmethod
    def from_json(json_comment):
        body = json_comment.get('body')
        if body is None or body == '':
            raise ValidationError('comments does not have a body ')
        return Comment(body=body)