def setup(app, loop): database = PostgresqlDatabase(database='test', host='127.0.0.1', user='******', password='******') objects = Manager(database, loop=loop)
def main(): parse_command_line() logging.getLogger().setLevel(LOGGING_LEVEL) application = Application() # add peewee_async objects = Manager(database) # 开启异步 database.set_allow_sync(False) application.objects = objects http_server = tornado.httpserver.HTTPServer(application, xheaders=True) api_port = API_PORT if len(sys.argv) == 1 else int(sys.argv[1]) http_server.bind(api_port) http_server.start() # 加载默认参数 import_default_config() import_sign_config() import_sign_award_config() import_tool_config() import_loto_config() import_robot_config() import_challenge_config() print("API Server start on port %s" % api_port) try: tornado.ioloop.IOLoop.current().start() except KeyboardInterrupt: logging.info("API Server stoped") sys.exit(0)
async def main(): # 测试用hash密码来连接MySQL数据库 from webDB.settings import database from peewee_async import Manager from apps.manager.models import UserDB from peewee_async import MySQLDatabase import aiomysql obj = Manager(database) record = await obj.get(UserDB, id=2, owner_id=1) print(record.host) print(record.password) config = { 'host': record.host, 'port': record.port, 'user': record.user, 'password': '******', 'db': record.database, } conn = await aiomysql.connect(**config) async with conn.cursor() as cur: await cur.execute( 'select host, password from userdb where id=1 and owner_id=1') r = await cur.fetchall() print(r) conn.close()
def make_manager(): # create table synchronously manager = Manager(db, loop=loop) # disable any future syncronous calls # raise AssertionError on ANY sync call manager.database.allow_sync = False return manager
def __new__(cls, *args, **kwargs): if not hasattr(cls, "_instance"): kwargs = cls.from_config() cls.conn = PooledMySQLDatabase(**kwargs, charset='utf8mb4') cls.manager = Manager(cls.conn) cls._instance = super(MysqlPool, cls).__new__(cls) return cls._instance
def objects(cls): try: if cls.manager: pass except AttributeError: cls.manager = Manager(cls._meta.database) return cls.manager
async def test_objects(): # objects is an instance of peewee_async.Manager objects = BaseModel.objects() assert str(type(objects)) == str(type(Manager(database=database))) # both objects are referring to the same Manager objects2 = BaseModel.objects() assert objects == objects2
def fixt_saved_user(fixt_username, fixt_objects: Manager): with fixt_objects.allow_sync(): user, was_created = DbUser.get_or_create(username=fixt_username, defaults={ 'username': fixt_username, 'is_admin': False }) return user
def __init__(self): wtforms_json.init() objects = Manager(database_async) database_async.set_allow_sync(False) web.Application.objects = objects super(Application, self).__init__(route.urls, debug=settings.DEBUG, **settings.TORNADO_CONF)
class BaseModel(Model): """Base model class which specifies orm manager and Postgresql database.""" manager = Manager(DATABASE) class Meta(object): """Model metadata.""" database = DATABASE
async def make_dev(mgr: Manager, users): added = [] async with mgr.atomic(): for user_id in users: user, created = await mgr.get_or_create(User, id=user_id) if user.role < UserRoles.DEVELOPER.value: user.role = UserRoles.DEVELOPER.value await mgr.update(user) added.append(user_id) return added
def make_app(): app = web.Application(urlpattern, **settings) # 就在这里添加数据库连接 objects = Manager(database) # 禁止使用同步操作 database.set_allow_sync(False) app.objects = objects return app
def setUp(self): self.db = PooledPostgresqlDatabase( database=environ.get('PGDB', 'postgres'), user=environ.get('PGUSER', 'admin'), password=environ.get('PGPASS', 'admin')) self.db_manager = Manager(database=self.db) self.ar_strategy = PeweeActiveRecordStrategy( manager=self.db_manager, active_record_class=EventRecord ) self.app = ToDoApplication(entity_active_record_strategy=self.ar_strategy, )
def test(self): db = self.get_mysql_db() # Re proxy to avoid previous test use SyncManager._meta.database = Proxy() # Init/Create in sync mode SyncManager.init_db(db) SyncManager.create_table() # Clear out from previous test run SyncManager.delete().execute() sync_manager = get_sync_manager(app="test-async", start=0, db=db, set_async=True) async def it(since=None, limit=None, offset=None): log.debug("Getting iterator since={} limit={} offset={}".format( since, limit, offset)) def dummy(): for x in range(since + 1, since + limit + 1): log.debug("yielded {}".format(x)) yield {"x": x} return LastOffsetQueryIterator(dummy(), row_output_fun=lambda x: x, key_fun=lambda x: x['x'], is_unique_key=True) output = [] async def process(it): nonlocal output for item in it: output.append(item) log.debug("process item: {}".format(item)) processor = AsyncProcessor(sync_manager=sync_manager, it_function=it, process_function=process, object=Manager(db, loop=None)) async def consume(): await processor.process(limit=10, i=3) asyncio.get_event_loop().run_until_complete(consume()) self.assertEqual(len(output), 30)
def __new__(cls, *args, **kwargs): if not hasattr(cls, "_instance"): logger.debug("init mysql pool") cls.conn = PooledMySQLDatabase('blog', host='localhost', password='', port=3306, user='******', max_connections=10, charset='utf8mb4') cls.manager = Manager(cls.conn) cls._instance = super(MysqlPool, cls).__new__(cls, *args, **kwargs) return cls._instance
def __init__(self): self.database = PooledMySQLDatabase( Config()['sql']['db'], user=Config()['sql']['user'], password=Config()['sql']['password'], host=Config()['sql']['host'], port=int(Config()['sql']['port']), max_connections=10) self.objects = Manager(self.database, loop=asyncio.get_event_loop()) self.objects.database.allow_sync = False self.user_locks = defaultdict(asyncio.Lock)
def main() -> None: # 必须明确指出 使用 asyncio 事件循环,不知道是不是 只有windows下才需要明确指出 AsyncIOMainLoop().install() loop = asyncio.get_event_loop() # print(dir(loop)) app = App() db_manager = Manager(database=DATA_BASE, loop=loop) db_manager.database.allow_sync = False app.setup_db_manager(db_manager) # TODO 生产环境不能这么做 app.listen(options.port) loop.call_soon(lambda: print(f'server had started at {options.port}')) loop.run_forever()
def __new__(cls, *args, **kwargs): if not hasattr(cls, "_instance"): logger.debug("init mysql pool") cls.conn = PooledMySQLDatabase( database=_settings['database'], host=_settings['host'], password=_settings['password'], port=_settings['port'], user=_settings['user'], max_connections=_settings['max_connections'], charset=_settings['charset']) cls.manager = Manager(cls.conn) cls._instance = super(MysqlPool, cls).__new__(cls, *args, **kwargs) return cls._instance
def serve(): wtforms_json.init() # init_log() app = make_app(False) app.listen(options.port) # No need for sync anymore! objects = Manager(database) database.set_allow_sync(False) app.objects = objects sys.stdout.write(f"Start server at:http://0.0.0.0:{options.port} \nSettings: {options.settings}\n") tornado.ioloop.IOLoop.current().start()
def make_app(loop): apps = Application(urlspatten, **settings, debug=True) func = RedisPool(loop=loop) # redis异步库 apps.redis = func.get_conn() # 异步请求库 apps.session = func.get_aiohttp() # mysql异步ORM库 objects = Manager(database) database.set_allow_sync(False) apps.objects = objects return apps
async def _task(db: Manager) -> int: async with db.atomic(): # 向 user表插入一条数据,返回值是pk pk = await db.execute(user.insert(user.__data__)) # 添加默认社区 # 1. 加入官方社区 group = await db.get(Group, group_id=1) await db.create(UserGroup, user=user, group=group) # 2.加入一级邀请人的社区 if user.inviter1: try: group2 = await db.get(Group, owner=user.inviter1) await db.create(UserGroup, user=user, group=group2) except DoesNotExist: pass except IntegrityError: raise IntegrityError return pk
def __new__(cls, *args, **kwargs): if not hasattr(cls, "_instance"): PROJECT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.pardir) if PROJECT_PATH not in sys.path: sys.path.insert(0, PROJECT_PATH) from config import mysql cls.conn = PooledMySQLDatabase( mysql['name'], host=mysql['host'], password=mysql['password'], port=mysql['port'], user=mysql['user'], min_connections=mysql['min_connections'], max_connections=mysql['max_connections'], charset=mysql['charset']) cls.manager = Manager(cls.conn) # cls.conn.set_allow_sync(False) cls.conn.set_allow_sync(True) cls._instance = super(MysqlPool, cls).__new__(cls, *args, **kwargs) return cls._instance
import asyncio import peewee import logging import random from collections import AsyncIterable from peewee_async import Manager, PostgresqlDatabase import asyncio loop = asyncio.get_event_loop() database = PostgresqlDatabase('tags', user='******', password='******', host='postgres') objects = Manager(database, loop=loop) class Tag(peewee.Model): id = peewee.IntegerField(primary_key=True, unique=True, index=True) account = peewee.CharField() campaign_id = peewee.IntegerField(index=True) tag = peewee.CharField() class Meta: database = database objects.database.allow_sync = False async def fill_table():
class BaseModel(Model): # async objects = Manager(async_db) DELETE_NO = 0 DELETE_IS = 1 DELETE_CHOICES = ( (0, '未删除'), (1, '已删除'), ) class Meta: database = sync_db create_time = DateTimeField(default=dt.now, verbose_name='创建时间') update_time = DateTimeField(default=dt.now, verbose_name='更新时间') is_delete = IntegerField(default=DELETE_NO, choices=DELETE_CHOICES, verbose_name='是否删除') @classmethod def get_sql(cls, *fields, order_by=None, paginate=None, **where): return cls._get_query(*fields, order_by=order_by, paginate=paginate, **where) # Async @classmethod async def async_sql(cls, sql, *params) -> AsyncQueryWrapper: """async execute sql """ return await cls.objects.execute(cls.raw(sql, *params)) @classmethod async def async_execute(cls, query): return await cls.objects.execute(query) @classmethod async def async_execute_count(cls, query): return await cls.objects.count(query) @classmethod async def async_get(cls, *fields, order_by=None, paginate=None, **where) -> Model or None: query = cls._get_query(*fields, order_by=order_by, paginate=paginate, **where) try: result = await cls.objects.execute(query) return list(result)[0] except IndexError: return None @classmethod async def async_select(cls, *fields, order_by=None, paginate=None, **where) -> AsyncQueryWrapper: """ Simple select :param fields: 需要获取的字段 :param order_by: 排序 :param paginate: 分页 Demo: (0, 10) :param where: 条件 关键字条件 """ query = cls._get_query(*fields, order_by=order_by, paginate=paginate, **where) return await cls.objects.execute(query) @classmethod async def async_count(cls, *fields, clear_limit=False, **where) -> int: query = cls._get_query(*fields, **where) return await cls.objects.count(query, clear_limit) @classmethod async def async_create(cls, **kwargs) -> Model: """Create object """ for field, value in kwargs.items(): if not hasattr(cls, field): raise AttributeError("%s object has no attribute %s" % (cls.__name__, field)) return await cls.objects.create(cls, **kwargs) async def async_update(self, _only=True, **kwargs) -> Model: """Update object, only kwargs """ self.update_time = dt.now() update_fields = ['update_time'] for field, value in kwargs.items(): if hasattr(self, field): if getattr(self, field) != value: setattr(self, field, value) else: raise AttributeError("%s object has no attribute %s" % (self.__class__.__name__, field)) update_fields.append(field) if _only: _only = update_fields else: _only = None await self.objects.update(self, only=_only) return self async def async_delete(self) -> Model: """Soft delete, `DELETE_NO` -> `DELETE_IS` """ await self.async_update(is_delete=self.DELETE_IS) return self # Sync @classmethod def sync_sql(cls, sql, *params): return sync_db.execute_sql(sql, params) @classmethod def sync_get(cls, *fields, order_by=None, paginate=None, **where) -> Model or None: query = cls._get_query(*fields, order_by=order_by, paginate=paginate, **where) try: result = query.limit(1) return list(result)[0] except Exception: return None @classmethod def sync_select(cls, *fields, order_by=None, paginate=None, **where): """ Simple select :param fields: 需要获取的字段 :param order_by: 排序 :param paginate: 分页 Demo: (0, 10) :param where: 条件 关键字条件 """ query = cls._get_query(*fields, order_by=order_by, paginate=paginate, **where) return query @classmethod def sync_count(cls, *fields, clear_limit=False, **where) -> int: query = cls._get_query(*fields, **where) return query.count(clear_limit) @classmethod def sync_create(cls, **kwargs) -> Model: return cls(**kwargs).save(force_insert=True) def sync_update(self, _only=True, **kwargs) -> Model: """Update object, only kwargs """ self.update_time = dt.now() update_fields = ['update_time'] for field, value in kwargs.items(): if hasattr(self, field): if getattr(self, field) != value: setattr(self, field, value) else: raise AttributeError("%s object has no attribute %s" % (self.__class__.__name__, field)) update_fields.append(field) if _only: _only = update_fields else: _only = None self.save(only=_only) return self def sync_delete(self) -> Model: """Soft delete, `DELETE_NO` -> `DELETE_IS` """ self.sync_update(is_delete=self.DELETE_IS) return self def dt_to_str(self, datetime, format="%Y-%m-%d %H:%M:%S"): return dt.dt_to_str(datetime, format) @classmethod def get_or_expression(cls, kwargs): return reduce(operator.or_, (cls.get_expression(field, value) for field, value in kwargs.items())) @classmethod def get_and_expression(cls, kwargs): return reduce(operator.and_, (cls.get_expression(field, value) for field, value in kwargs.items())) @classmethod def get_expression(cls, field, value): field = getattr(cls, field, None) if isinstance(value, Func): if value.opt == '%': return field ** value.val elif value.opt == 'in': return field.in_(value.val) elif value.opt == 'ni': return field.not_in(value.val) elif value.opt == '>': return field > value.val elif value.opt == '<': return field < value.val elif value.opt == '>=': return field >= value.val elif value.opt == '<=': return field <= value.val elif value.opt == 'is': return field.is_null() elif value.opt == 'si': return field.is_null(False) elif value.opt == '=': return field == value.val elif value.opt == '!=': return field != value.val elif value.opt == '|': return cls.get_or_expression(value.val) elif value.opt == '&': return cls.get_and_expression(value.val) else: raise AttributeError("Func has no operator %s" % value.opt) else: return field == value @classmethod def _get_query(cls, *fields, order_by=None, paginate=None, **where) -> Query: query = cls.select(*fields) expressions = [] if where: for field, value in where.items(): if hasattr(cls, field): if isinstance(value, (list, tuple)): for val in value: expressions.append(cls.get_expression(field, val)) else: expressions.append(cls.get_expression(field, value)) else: if isinstance(value, Func) and (value.opt == '&' or value.opt == '|'): expressions.append(cls.get_expression(field, value)) else: raise AttributeError("%s Model has no field %s" % (cls.__name__, field)) query = query.where(*expressions) if order_by: if isinstance(order_by, (list, tuple)): query = query.order_by(*order_by) else: query = query.order_by(order_by) if paginate: page, paginate_by = paginate query = query.paginate(page, paginate_by) return query async def normal_info(self): return { 'create_time': self.dt_to_str(self.create_time), 'update_time': self.dt_to_str(self.update_time), 'is_delete': self.is_delete, }
async def set_db(_app, _loop): database.initialize(PooledPostgresqlDatabase(**app.config.DATABASE)) BaseModel.pee = Manager(database, loop=_loop) app.redis = await aioredis.create_pool(**app.config.REDIS)
def get_async_manager(cls): from peewee_async import Manager return Manager(cls._meta.database)
async def new_user_async( username: str, password: str, email: str, is_remote: bool = False, confirmed: bool = False, is_private: bool = False, is_admin: bool = False, public_key: str = None, name: str = None, description: str = "", ap_id: str = None, send_confirmation: bool = True, public_inbox: Union[str, None] = None) -> Union[bool, UserProfile]: """ Returns False or UserProfile """ if not (password and username and email): return False objects = Manager(db) # Verify username logging.debug(f"Starting to create user {username}") username_count = await objects.count( User.select().where(User.username == username)) if not valid_username(username) or (username_count != 0): logger.error(f"@{username} is a not valid username") return False # Hash the password passw = bcrypt.hashpw(password, salt_code) # First we create the actual user try: user = await objects.create( User, username=username.lower(), password=passw, email=email, confirmed=confirmed, is_admin=is_admin, is_private=is_private, ) except Exception as e: logging.error(f"User not created: {e}") return False logging.debug(f"Created user {user.username}") if name == None: name = username # Now we create the profile try: data = { "id": user.id, "disabled": True, "is_remote": is_remote, "user": user, "name": name, "public_key": public_key, "description": description, 'public_inbox': public_inbox } if is_remote: data['ap_id'] = ap_id else: data['ap_id'] = uri("user", {"username": username}) profile = await objects.create(UserProfile, **data) # Send the confirmation email if not user.confirmed and send_confirmation: send_activation_email(profile) logging.info(f"New Profile created: {profile}") return profile except Exception as e: logging.error(e) await objects.delete(user) return False
from tornado import web import tornado.ioloop from peewee_async import Manager from ZxForm.urls import urlpattern from ZxForm.settings import settings, database if __name__ == "__main__": #集成json到wtforms import wtforms_json wtforms_json.init() app = web.Application(urlpattern, debug=True, **settings) app.listen(8888) objects = Manager(database) # No need for sync anymore! database.set_allow_sync(False) app.objects = objects tornado.ioloop.IOLoop.current().start()
listen_port = '80' listen_ip = '0.0.0.0' # IP to bind to for the server, 0.0.0.0 means all local IPv4 addresses ssl_cert_path = './app/ssl/apfell-cert.pem' ssl_key_path = './app/ssl/apfell-ssl.key' whitelisted_ip_blocks = [ '0.0.0.0/0' ] # only allow connections from these IPs to the /login and /register pages use_ssl = False # -------------------------------------------- # -------------------------------------------- # -------------------------------------------- # custom loop to pass to db manager dbloop = uvloop.new_event_loop() apfell_db = PooledPostgresqlDatabase(db_name, user=db_user, password=db_pass) apfell_db.connect_async(loop=dbloop) db_objects = Manager(apfell_db, loop=dbloop) apfell = Sanic(__name__, strict_slashes=False) apfell.config[ 'WTF_CSRF_SECRET_KEY'] = 'really secure super secret key here, and change me!' apfell.config['SERVER_IP_ADDRESS'] = server_ip apfell.config['SERVER_PORT'] = listen_port apfell.config['DB_USER'] = db_user apfell.config['DB_PASS'] = db_pass apfell.config['DB_NAME'] = db_name apfell.config['DB_POOL_CONNECT_STRING'] = 'dbname=' + apfell.config[ 'DB_NAME'] + ' user='******'DB_USER'] + ' password='******'DB_PASS'] apfell.config['API_VERSION'] = "1.0" apfell.config['API_BASE'] = "/api/v" + apfell.config['API_VERSION'] apfell.config['REQUEST_MAX_SIZE'] = 100000000
def fixt_objects(fixt_db, event_loop) -> Manager: objects = Manager(database=fixt_db, loop=event_loop) return objects