Example #1
0
def populte():
    print('drop database')
    db.drop_all()
    print('drop done')
    print('create database')
    db.create_all()
    print('create database done')
    print('creating......')
    for user in users:
        hash_password = bcrypt.generate_password_hash(
            user.get('password')).decode('utf-8')
        create_user = User(username=user.get('username'),
                           email=user.get('email'),
                           password=hash_password)
        account = Account()
        create_user.account = account
        db.session.add(create_user)
        db.session.commit()
        print('create user success')
    print('create users done')
    admin = User.query.filter_by(username='******').first()
    for post in posts:
        create_post = Post(title=post, content=post * 3, author=admin)
        db.session.add(create_post)
        db.session.commit()
        print('create post success')
    print('done')
Example #2
0
def app():
    print("\nCalling app fixture...\n")
    app = create_app(testing=True)

    with app.app_context():
        db.drop_all()
        db.create_all()
        db.session.add_all((User(username='******', password='******'),
                            User(username='******', password='******'),
                            Post(title='test title',
                                 body='test\nbody',
                                 author_id=1,
                                 created=datetime.date(2021, 2, 28))))
        db.session.commit()

    yield app
Example #3
0
 def run(self):
     db.drop_all()
Example #4
0
def make_migration():
    with app.app_context():
        db.drop_all()
        tables_name = ['user','post','account']
        for name in tables_name:
            conn = psycopg2.connect(database='flaskdb',user='******',password='******',host='localhost')
            cursor = conn.cursor()
            try:
                cursor.execute(f'select * from public.{name}')
                conn.close()
            except Exception as e: # 需要将连接写入循环,因查询第一次后会触发连接断开
                if f'错误:  关系 "public.{name}" 不存在' in str(e):
                    print(f'确认删除表{name}')
                    conn.close()
                    # print(dir(cursor))
                else:
                    print(e)
        print('清空数据库成功')
        print('准备重新初始化数据库')
        db.create_all()
        print('初始化数据库成功')
    # 获取上次excel文件
    os.getcwd()
    file_dir = os.getcwd()+'/migrations/'
    ls = os.listdir(file_dir)
    next_number = 0 # 初始编号
    if ls:
        combine_tuple = tuple(zip(range(len(ls)),ls))
        print(combine_tuple)
        next_number = combine_tuple[-1][0]
    ex_file_name = f'migrations/mg{next_number}_blog.xlsx'
    wb = load_workbook(filename = ex_file_name)
    
    with psycopg2.connect(database='flaskdb',user='******',password='******',host='localhost') as conn:
        cursor = conn.cursor()
        for name in tables_name:
            sheet = wb[f'{name} data']
            rows = sheet.rows
            # print(dir(rows))
            print('-'*12)
            model_name = name.title() # model名
            same_fields = None # 字段相同但是位置不一样时这个变量用来代替real_fields
            tag_delete = None # 字段减少标志重置
            tag_add = None # 字段增加标志重置
            diff_fields = None # 子循环完毕重置
            diff_fields_index = [] # 索引位列表,用于删除或插入line的数据
            for row in rows:
                cursor.execute(f'select * from public.{name}')
                fields = cursor.description
                # print(fields)
                fields_name = [ field[0] for field in fields]
                line = [col.value for col in row]
                real_fields = fields_name
                orig_fields = line
                # print(real_fields)
                # print(orig_fields)
                if fields_name == orig_fields: # 字段没有改变
                    print(f'The {name} table fields no change')
                    continue
                elif len(fields_name) < len(orig_fields) and tag_delete is not True: # 字段减少
                    diff_fields = [ field for field in orig_fields if field not in fields_name]
                    print(f'The {name} table fields have been changed ,{diff_fields} are removed')
                    tag_delete = True
                    # 字段减少导致的数据错位,需要删除对应字段的数据
                    if diff_fields:
                        for field in diff_fields:
                            index = orig_fields.index(field)
                            diff_fields_index.append(index)
                    # print(diff_fields_index)
                    continue
                elif len(fields_name) > len(orig_fields) and tag_add is not True: # 字段增加
                    diff_fields = [ field for field in real_fields if field not in orig_fields]
                    print(f'The {name} table fields have been changed ,{diff_fields} are added')
                    tag_add = True
                    # 字段增加导致的数据错位,需要插入对应字段的数据,默认插入None,字段要在数据库中允许为空
                    if diff_fields:
                        for field in diff_fields:
                            index = real_fields.index(field)
                            diff_fields_index.append(index)
                    continue            
                elif fields_name != orig_fields and orig_fields[0] == 'id': 
                    # print(fields_name == line)
                    diff_fields = [ field for field in orig_fields if field not in fields_name]
                    # print(diff_fields)
                    if not diff_fields and len(fields_name) == len(orig_fields): # 字段相同但是位置不一样
                        print(f'The {name} table fields are same before , but locations are difference')
                        same_fields = orig_fields
                        # print(same_fields)
                    if diff_fields:
                        raise AttributeError(f'{diff_fields} fields not found')
                    continue
                if same_fields:
                    import_db(name,same_fields,line)
                if diff_fields_index:
                    # print(diff_fields_index)
                    diff_index = 0
                    for index in diff_fields_index:
                        if tag_delete:
                            _data = line.pop(index-diff_index)
                            print(f'因{diff_fields[diff_index]}移除,{_data}将不被导入数据库')                            
                        if tag_add:
                            line.insert(index,None)
                            print(f'字段{diff_fields[diff_index]}增加,开始导入默认数据')
                            
                        diff_index += 1
                    dict_data = dict(zip(real_fields,line))
                    if tag_add:
                        list_data = list(zip(real_fields,line))
                        rm_index = 0
                        for index in diff_fields_index:
                            rm_data = list_data.pop(index-rm_index)
                            rm_index += 1 
                        print(f'该行导入默认数据完成')
                        dict_data = dict(list_data)
                    import_db(model_name,dict_data)
                    print('-'*8)
                if (not tag_add and not tag_delete) and not same_fields:
                    dict_data = dict(zip(real_fields,line))
                    import_db(model_name,dict_data)
def drop_db():
    db.drop_all()
Example #6
0
 def tearDown(self):
     db.session.remove()
     db.drop_all()
     self.app_context.pop()
Example #7
0
def init_db():
    db.drop_all()
    db.create_all()