def create_app(Config, enable_config_file=False): """ 创建应用的工厂函数 :param config: 配置信息类 :param enable_config_file: 是否允许环境变量中的配置文件覆盖已加载的配置信息 :return: 应用对象 """ app = create_flask_app(Config, enable_config_file) """项目运行需要的配置""" # 配置日志 from utils.logging import create_logger create_logger(app) # 配置redis哨兵 from redis.sentinel import Sentinel _sentinel = Sentinel(app.config.get('REDIS_SENTINELS')) # 根据哨兵设置主、从服务 app.redis_master = _sentinel.master_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) app.redis_slave = _sentinel.slave_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) # Redis集群 from rediscluster import StrictRedisCluster app.redis_cluster = StrictRedisCluster( startup_nodes=app.config['REDIS_CLUSTER']) # MySQL数据库连接初始化 from models import db db.init_app(app) # 创建Snowflake ID worker--雪花算法 from utils.snowflake.id_worker import IdWorker app.id_worker = IdWorker(app.config['DATACENTER_ID'], app.config['WORKER_ID'], app.config['SEQUENCE']) """添加请求钩子""" from utils.middlewares import jwt_authentication app.before_request(jwt_authentication) """注册蓝图""" # 用户蓝图 from toutiao.resources.user import user_bp app.register_blueprint(user_bp) """定时任务,每天3点更正我们redis和mysql的数据""" # 初始化调度器,并配置最大开始10个线程(不指定,默认10个) bg_scheduler = BackgroundScheduler( executor={'default': ThreadPoolExecutor()}) # 添加任务函数 # bg_scheduler.add_job('任务函数', '执行器', '执行周期时间') # bg_scheduler.add_job('任务函数', 'cron', hours=3) # 每天3点执行 from toutiao.aps_scheduler.statistic_data import fix_process bg_scheduler.add_job(fix_process, 'date', args=[app]) # 为了测试让他立即执行 # 执行任务 bg_scheduler.start() return app
def create_app(config, enable_config_file=False): """ 创建flask应用 并 初始化各组件 :param config: 配置类 :param enable_config_file: 是否允许运行环境中的配置文件覆盖已加载的配置信息 :return: flask应用 """ app = create_flask_app(config, enable_config_file) # 添加自定义正则转换器 from utils.converters import register_converters register_converters(app) # TODO 创建redis哨兵 from redis.sentinel import Sentinel _sentinel = Sentinel(app.config['REDIS_SENTINELS']) # 获取redis主从连接对象 app.redis_master = _sentinel.master_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) app.redis_slave = _sentinel.slave_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) # 配置日志 from utils.logging import create_logger create_logger(app) # 限流器 from utils.limiter import limiter as lmt lmt.init_app(app) # 注册用户模块蓝图 from .resources.user import user_bp app.register_blueprint(user_bp) # 注册新闻模块蓝图 from .resources.news import news_bp app.register_blueprint(news_bp) # 注册搜索模块蓝图 from .resources.search import search_bp app.register_blueprint(search_bp) return app
def create_app(config, enable_config_file=False): """ 创建应用 :param config: 配置信息对象 :param enable_config_file: 是否允许运行环境中的配置文件覆盖已加载的配置信息 :return: 应用 """ app = create_flask_app(config, enable_config_file) # 注册url转换器 sys.path.append('../') from utils.converters import register_converters register_converters(app) # Redis配置 from flask_redis import FlaskRedis app.config['REDIS_URL'] = global_config.getRaw('redis', 'REDIS_URL') app.redis_cli = FlaskRedis(app) # 配置限流器 from utils.limiter import limiter as lmt lmt.init_app(app) # 添加请求钩子 from utils.middlewares import jwt_authentication app.before_request(jwt_authentication) # 配置日志 from utils.logging import create_logger create_logger(app) # 注册用户模块蓝图 from haozhuan.resources.user import user_bp app.register_blueprint(user_bp) # 允许跨域 return app
from huobi.connection.impl.websocket_manage import websocket_connection_handler from huobi.constant.system import RestApiDefine, WebSocketDefine from huobi.utils import PrintBasic from utils.logging import create_logger from utils.parallel import kill_thread ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) CONFIG_PATH = os.path.join(ROOT, 'config', 'config.ini') USER_CONFIG_PATH = os.path.join(ROOT, 'config', 'user.ini') LOG_PATH = os.path.join(ROOT, 'log', 'trade.log') URL = 'https://api-aws.huobi.pro' WS_URL = 'wss://api-aws.huobi.pro' logger = create_logger('goodmorning', LOG_PATH) config = configparser.ConfigParser() config.read(CONFIG_PATH) user_config = configparser.ConfigParser() if os.path.exists(USER_CONFIG_PATH): user_config.read(USER_CONFIG_PATH) session._request = session.request session.request = lambda *args, **kwargs: session._request(timeout=1, *args, **kwargs) WebSocketDefine.Uri = WS_URL RestApiDefine.Url = URL PrintBasic.print_basic = lambda data, name=None: None
def create_app(config, enable_config_file=False): """ 创建应用 :param config: 配置信息对象 :param enable_config_file: 是否允许运行环境中的配置文件覆盖已加载的配置信息 :return: 应用 """ app = create_flask_app(config, enable_config_file) # 创建Snowflake ID worker from utils.snowflake.id_worker import IdWorker app.id_worker = IdWorker(app.config['DATACENTER_ID'], app.config['WORKER_ID'], app.config['SEQUENCE']) # 限流器 from utils.limiter import limiter as lmt lmt.init_app(app) # 配置日志 from utils.logging import create_logger create_logger(app) # 注册url转换器 from utils.converters import register_converters register_converters(app) from redis.sentinel import Sentinel _sentinel = Sentinel(app.config['REDIS_SENTINELS']) app.redis_master = _sentinel.master_for(app.config['REDIS_SENTINEL_SERVICE_NAME']) app.redis_slave = _sentinel.slave_for(app.config['REDIS_SENTINEL_SERVICE_NAME']) from rediscluster import StrictRedisCluster app.redis_cluster = StrictRedisCluster(startup_nodes=app.config['REDIS_CLUSTER']) # rpc # app.rpc_reco = grpc.insecure_channel(app.config['RPC'].RECOMMEND) # Elasticsearch app.es = Elasticsearch( app.config['ES'], # sniff before doing anything sniff_on_start=True, # refresh nodes after a node fails to respond sniff_on_connection_fail=True, # and also every 60 seconds sniffer_timeout=60 ) # socket.io # app.sio_maneger = socketio.KombuManager(app.config['RABBITMQ'], write_only=True) # MySQL数据库连接初始化 from models import db db.init_app(app) # 添加请求钩子 from utils.middlewares import jwt_authentication app.before_request(jwt_authentication) # 注册用户模块蓝图 from .resources.user import user_bp app.register_blueprint(user_bp) # 注册新闻模块蓝图 from .resources.news import news_bp app.register_blueprint(news_bp) # 注册通知模块 from .resources.notice import notice_bp app.register_blueprint(notice_bp) # 搜索 from .resources.search import search_bp app.register_blueprint(search_bp) return app
def create_app(config, enable_config_file=False): """ 创建应用 :param config: 配置信息对象 :param enable_config_file: 是否允许运行环境中的配置文件覆盖已加载的配置信息 :return: 应用 """ app = create_flask_app(config, enable_config_file) # 创建Snowflake ID worker from utils.snowflake.id_worker import IdWorker app.id_worker = IdWorker(app.config['DATACENTER_ID'], app.config['WORKER_ID'], app.config['SEQUENCE']) # 限流器 from utils.limiter import limiter as lmt lmt.init_app(app) # CORS CORS(app) # 配置日志 from utils.logging import create_logger create_logger(app) # 注册url转换器 from utils.converters import register_converters register_converters(app) # redis # 暂时保留旧redis接口 from utils.redis_client import create_redis_clients app.redis_cli = create_redis_clients(app) from redis.sentinel import Sentinel _sentinel = Sentinel(app.config['REDIS_SENTINELS']) app.redis_master = _sentinel.master_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) app.redis_slave = _sentinel.slave_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) from rediscluster import StrictRedisCluster app.redis_cluster = StrictRedisCluster( startup_nodes=app.config['REDIS_CLUSTER']) # Elasticsearch app.es = Elasticsearch( app.config['ES'], # sniff before doing anything sniff_on_start=True, # refresh nodes after a node fails to respond sniff_on_connection_fail=True, # and also every 60 seconds sniffer_timeout=60) # MySQL数据库连接初始化 from models import db db.init_app(app) # 已废弃 添加异常处理 # from utils.error_handlers import handle_redis_error, handler_mysql_error # app.register_error_handler(RedisError, handle_redis_error) # app.register_error_handler(SQLAlchemyError, handler_mysql_error) # 添加请求钩子 from utils.middlewares import jwt_authentication app.before_request(jwt_authentication) # 注册用户模块蓝图 from .resources.user import user_bp app.register_blueprint(user_bp) # 注册用户模块蓝图 from .resources.news import news_bp app.register_blueprint(news_bp) # 注册统计模块 from .resources.statistic import statistic_bp app.register_blueprint(statistic_bp) return app
for k, v in checkpoint_state['model'].items() if k in model_dict } model_dict.update(pretrained_dict) model.load_state_dict(model_dict) # other states from checkpoint -- optimizer, scheduler, loss, epoch initial_epoch = checkpoint_state['epoch'] + 1 optimizer.load_state_dict(checkpoint_state['optimizer']) scheduler.load_state_dict(checkpoint_state['scheduler']) criterion.load_state_dict(checkpoint_state['criterion']) # initialize logger log_dir = os.path.join( args.base_directory, "logs", f'{hp.version}-{datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")}') Logger.initialize(log_dir, args.flush_seconds) text_logger = create_logger(log_dir) # training loop best_eval = float('inf') for epoch in range(initial_epoch, hp.epochs): train(args.logging_start, epoch, train_data, model, criterion, optimizer) if hp.learning_rate_decay_start - hp.learning_rate_decay_each < epoch * len( train_data): scheduler.step() eval_loss = evaluate(epoch, eval_data, model, criterion) if (epoch + 1) % hp.checkpoint_each_epochs == 0: # save checkpoint together with hyper-parameters, optimizer and scheduler states checkpoint_file = f'{checkpoint_dir}/{hp.version}_loss-{epoch}-{eval_loss:2.3f}' state_dict = { 'epoch': epoch,
def create_app(config, enable_config_file=False): """ 创建应用 :param config: 配置信息对象 :param enable_config_file: 是否允许运行环境中的配置文件覆盖已加载的配置信息 :return: 应用 """ app = create_flask_app(config, enable_config_file) # 允许跨域 CORS(app, supports_credentials=True) # 限流器 from utils.limiter import limiter limiter.init_app(app) # 配置日志 from utils.logging import create_logger create_logger(app) # 注册url转换器 from utils.converters import register_converters register_converters(app) # redis # 暂时保留旧redis接口 from utils.redis_client import create_redis_clients app.redis_cli = create_redis_clients(app) from redis.sentinel import Sentinel _sentinel = Sentinel(app.config['REDIS_SENTINELS']) app.redis_master = _sentinel.master_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) app.redis_slave = _sentinel.slave_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) from rediscluster import StrictRedisCluster app.redis_cluster = StrictRedisCluster( startup_nodes=app.config['REDIS_CLUSTER']) # rpc # app.rpc_reco = grpc.insecure_channel(app.config['RPC'].RECOMMEND) # Elasticsearch app.es = Elasticsearch( app.config['ES'], # sniff before doing anything sniff_on_start=True, # refresh nodes after a node fails to respond sniff_on_connection_fail=True, # and also every 60 seconds sniffer_timeout=60) # socket.io # app.sio = socketio.KombuManager(app.config['RABBITMQ'], write_only=True) # 与推荐系统对接的kafka app.kafka_producer = KafkaProducer( bootstrap_servers=app.config['KAFKA_SERVERS']) # MySQL数据库连接初始化 from models import db db.init_app(app) # 已废弃 添加异常处理 # from utils.error_handlers import handle_redis_error, handler_mysql_error # app.register_error_handler(RedisError, handle_redis_error) # app.register_error_handler(SQLAlchemyError, handler_mysql_error) # 添加请求钩子 from utils.middlewares import mis_jwt_authentication app.before_request(mis_jwt_authentication) # 注册用户管理模块蓝图 from .resources.user import user_bp app.register_blueprint(user_bp) # 注册信息管理模块蓝图 from .resources.information import information_bp app.register_blueprint(information_bp) # 注册数据统计模块蓝图 from .resources.statistics import statistics_bp app.register_blueprint(statistics_bp) # 注册系统管理模块蓝图 from .resources.system import system_bp app.register_blueprint(system_bp) # 注册推荐系统模块蓝图 from .resources.recommend import recommend_bp app.register_blueprint(recommend_bp) return app
def create_app(env_type, enable_config_file=False): """ 创建flask应用 并 初始化各组件 :param env_type: 环境类型 :param enable_config_file: 是否允许运行环境中的配置文件覆盖已加载的配置信息 :return: flask应用 """ app = create_flask_app(env_type, enable_config_file) # 添加自定义正则转换器 from utils.converters import register_converters register_converters(app) # 创建redis哨兵 from redis.sentinel import Sentinel _sentinel = Sentinel(app.config['REDIS_SENTINELS']) # 获取redis主从连接对象 app.redis_master = _sentinel.master_for(app.config['REDIS_SENTINEL_SERVICE_NAME']) app.redis_slave = _sentinel.slave_for(app.config['REDIS_SENTINEL_SERVICE_NAME']) # 创建redis集群 from rediscluster import StrictRedisCluster app.redis_cluster = StrictRedisCluster(startup_nodes=app.config['REDIS_CLUSTER']) # 配置myql数据库 from models import db db.init_app(app) # 配置日志 from utils.logging import create_logger create_logger(app) # 限流器 from utils.limiter import limiter as lmt lmt.init_app(app) # 创建Snowflake ID worker from utils.snowflake.id_worker import IdWorker app.id_worker = IdWorker(app.config['DATACENTER_ID'], app.config['WORKER_ID'], app.config['SEQUENCE']) # 创建执行器 from apscheduler.executors.pool import ThreadPoolExecutor executor = ThreadPoolExecutor() # 创建定时任务调度器 from apscheduler.schedulers.background import BackgroundScheduler app.scheduler = BackgroundScheduler(executors={'default': executor}) from scheduler.cache_schedule import fix_statistic # 添加定时任务 每天3天同步数据 # app.scheduler.add_job(fix_statistic, 'cron', hour=3) app.scheduler.add_job(fix_statistic, 'date', args=[app]) # 启动调度器 app.scheduler.start() # 建立grpc的连接 app.channel = grpc.insecure_channel(app.config['RPC'].RECOMMEND) # 创建socketio的消息队列管理器(要求flask应用处于生产模式) 将消息保存到消息队列中 import socketio app.siomgr = socketio.KombuManager(app.config['RABBIT_MQ']) # 创建es客户端 from elasticsearch5 import Elasticsearch app.es = Elasticsearch( app.config['ES_HOST'], # 启动前嗅探es集群服务器 sniff_on_start=True, # es集群服务器结点连接异常时是否刷新es节点信息 sniff_on_connection_fail=True, # 每60秒刷新节点信息 sniffer_timeout=60 ) # 添加请求钩子 from utils.middlewares import jwt_authentication app.before_request(jwt_authentication) # 注册用户模块蓝图 from .resources.user import user_bp app.register_blueprint(user_bp) # 注册新闻模块蓝图 from .resources.news import news_bp app.register_blueprint(news_bp) # 注册搜索模块蓝图 from .resources.search import search_bp app.register_blueprint(search_bp) return app
def create_app(config, enable_config_file=False): """ 创建应用 :param config: 配置信息对象 :param enable_config_file: 是否允许运行环境中的配置文件覆盖已加载的配置信息 :return: 应用 """ app = create_flask_app(config, enable_config_file) # 创建Snowflake ID worker from utils.snowflake.id_worker import IdWorker app.id_worker = IdWorker(app.config['DATACENTER_ID'], app.config['WORKER_ID'], app.config['SEQUENCE']) # 限流器 from utils.limiter import limiter as lmt lmt.init_app(app) # 配置日志 from utils.logging import create_logger create_logger(app) # 注册url转换器 from utils.converters import register_converters register_converters(app) from redis.sentinel import Sentinel _sentinel = Sentinel(app.config['REDIS_SENTINELS']) app.redis_master = _sentinel.master_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) app.redis_slave = _sentinel.slave_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) from rediscluster import StrictRedisCluster app.redis_cluster = StrictRedisCluster( startup_nodes=app.config['REDIS_CLUSTER']) # rpc app.rpc_reco = grpc.insecure_channel(app.config['RPC'].RECOMMEND) # Elasticsearch app.es = Elasticsearch( app.config['ES'], # sniff before doing anything sniff_on_start=True, # refresh nodes after a node fails to respond sniff_on_connection_fail=True, # and also every 60 seconds sniffer_timeout=60) # socket.io # app.sio = socketio.KombuManager(app.config['RABBITMQ'], write_only=True) # MySQL数据库连接初始化 from models import db db.init_app(app) # 添加请求钩子 from utils.middlewares import jwt_authentication app.before_request(jwt_authentication) # 注册用户模块蓝图 from .resources.user import user_bp app.register_blueprint(user_bp) # 注册新闻模块蓝图 from .resources.news import news_bp app.register_blueprint(news_bp) # 注册通知模块 from .resources.notice import notice_bp app.register_blueprint(notice_bp) # 搜索 from .resources.search import search_bp app.register_blueprint(search_bp) # 定义apscheduler的调度器对象 # 保存到flask的app对象,方便在视图中使用调度器添加新的定时任务 executors = { 'default': ThreadPoolExecutor(20), } app.scheduler = BackgroundScheduler(executors=executors) # 由scheduler管理的定时任务 两种: # 一种是一开始就明确确定的 ,比如 修正redis的统计数据 # 在此处定义 add_job # app.scheduler.add_job() # 添加定时修正统计数据的定时任务 from .schedulers.statistic import fix_statistics # 每天的凌晨3点执行 # 通过args 可以在调度器执行定时任务方法的时候,传递给定时任务方法参数 # app.scheduler.add_job(fix_statistics, 'cron', hour=3, args=[app]) # 为了测试方便,需要立即执行 app.scheduler.add_job(fix_statistics, 'date', args=[app]) # 另一种 是在flask运行期间,由视图函数产生的,动态添加的新定时任务 # 在视图函数中 调用 current_app.scheduler.add_job来添加 app.scheduler.start() # 非阻塞,不会阻塞住flask程序的执行,会在后台单独创建进程或线程进行计时 return app
import asyncio from concurrent.futures.thread import ThreadPoolExecutor import aiohttp from aiohttp import web from api.db.db_helpers import check_exist from api.db.tables import images from api.processing import check_image, decode_value from utils.logging import create_logger logger = create_logger(__name__) async def load_image_content(executor: ThreadPoolExecutor, session: aiohttp.ClientSession, image_urls: list, image_text: str) -> tuple: """ Function that read content of image and run blocking sync processing of image :param executor: ThreadPoolExecutor for run sync code in threads :param session: Client Session for every :param image_urls: List with image urls :param image_text: Text for checking on image :return: result (True if text was found on the image else False), url (Image url) """ loop = asyncio.get_running_loop() for url in image_urls: async with session.get(url) as response: content = await response.read() result = await loop.run_in_executor(executor, check_image, content, image_text)
def create_app(config, enable_config_file=False): """ 创建应用 :param config: 配置信息对象 :param enable_config_file: 是否允许运行环境中的配置文件覆盖已加载的配置信息 :return: 应用 """ app = create_flask_app(config, enable_config_file) # 创建Snowflake ID worker from utils.snowflake.id_worker import IdWorker app.id_worker = IdWorker(app.config['DATACENTER_ID'], app.config['WORKER_ID'], app.config['SEQUENCE']) # 限流器 from utils.limiter import limiter as lmt lmt.init_app(app) # 配置日志 from utils.logging import create_logger create_logger(app) # 注册url转换器 from utils.converters import register_converters register_converters(app) from redis.sentinel import Sentinel _sentinel = Sentinel(app.config['REDIS_SENTINELS']) app.redis_master = _sentinel.master_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) app.redis_slave = _sentinel.slave_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) from rediscluster import StrictRedisCluster app.redis_cluster = StrictRedisCluster( startup_nodes=app.config['REDIS_CLUSTER']) # rpc app.rpc_reco_channel = grpc.insecure_channel(app.config['RPC'].RECOMMEND) app.rpc_reco = app.rpc_reco_channel # Elasticsearch app.es = Elasticsearch( app.config['ES'], # sniff before doing anything sniff_on_start=True, # refresh nodes after a node fails to respond sniff_on_connection_fail=True, # and also every 60 seconds sniffer_timeout=60) # socket.io # 通过sio mgr对象 可以发布要进行即使消息推送的任务,由socketio服务器从rabbitmq中取出任务,推送消息 app.sio_mgr = socketio.KombuManager(app.config['RABBITMQ'], write_only=True) # MySQL数据库连接初始化 from models import db db.init_app(app) # 创建APScheduler定时任务调度器对象 executors = {'default': ThreadPoolExecutor(10)} app.scheduler = BackgroundScheduler(executors=executors) # 添加"静态的"定时任务 from .schedule.statistic import fix_statistics # app.scheduler.add_job(fix_statistics, 'date', args=[app]) app.scheduler.add_job(fix_statistics, 'cron', hour=3, args=[app]) # 启动定时任务调度器 app.scheduler.start() # 废弃 添加异常处理 对于flask-restful无效 # from utils.error_handlers import handle_redis_error, handler_mysql_error # app.register_error_handler(RedisError, handle_redis_error) # app.register_error_handler(SQLAlchemyError, handler_mysql_error) # 添加请求钩子 from utils.middlewares import jwt_authentication app.before_request(jwt_authentication) # 注册用户模块蓝图 from .resources.user import user_bp app.register_blueprint(user_bp) # 注册新闻模块蓝图 from .resources.news import news_bp app.register_blueprint(news_bp) # 注册通知模块 from .resources.notice import notice_bp app.register_blueprint(notice_bp) # 搜索 from .resources.search import search_bp app.register_blueprint(search_bp) return app
def create_app(config, enable_config_file=False): """ 创建应用 :param config: 配置信息对象 :param enable_config_file: 是否允许运行环境中的配置文件覆盖已加载的配置信息 :return: 应用 """ app = create_flask_app(config, enable_config_file) # 创建Snowflake ID worker from utils.snowflake.id_worker import IdWorker app.id_worker = IdWorker(app.config['DATACENTER_ID'], app.config['WORKER_ID'], app.config['SEQUENCE']) # 限流器 from utils.limiter import limiter as lmt lmt.init_app(app) # 配置日志 from utils.logging import create_logger create_logger(app) # 注册url转换器 from utils.converters import register_converters register_converters(app) from redis.sentinel import Sentinel _sentinel = Sentinel(app.config['REDIS_SENTINELS']) app.redis_master = _sentinel.master_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) app.redis_slave = _sentinel.slave_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) from rediscluster import StrictRedisCluster app.redis_cluster = StrictRedisCluster( startup_nodes=app.config['REDIS_CLUSTER']) # rpc app.rpc_reco_channel = grpc.insecure_channel(app.config['RPC'].RECOMMEND) # app.rpc_reco = grpc.insecure_channel(app.config['RPC'].RECOMMEND) # Elasticsearch app.es = Elasticsearch( app.config['ES'], # sniff before doing anything sniff_on_start=True, # refresh nodes after a node fails to respond sniff_on_connection_fail=True, # and also every 60 seconds sniffer_timeout=60) # socket.io # app.sio = socketio.KombuManager(app.config['RABBITMQ'], write_only=True) # MySQL数据库连接初始化 from models import db db.init_app(app) # # 添加请求钩子 from utils.middleware import jwt_authorization app.before_request(jwt_authorization) # 添加定时任务APScheduler from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.executors.pool import ThreadPoolExecutor # 触发器 from apscheduler.triggers import date, interval, cron from toutiao.schedule.statistics import fix_statistics # 1.创建执行器对象executors executors = { # 默认会将定时任务使用线程执行,并且添加到线程池,最大并发10个线程 "default": ThreadPoolExecutor(max_workers=10) } # 2.创建调度器对象-使用executors进行配置 scheduler = BackgroundScheduler(executors=executors) # 2.1 将scheduler对象保存到app中,其他地方如果需要添加`动态任务` :current_app.scheduler.add_job(动态任务) app.scheduler = scheduler # 3.添加任务--修正统计数据--`静态任务` # app.scheduler.add_job(func="定时任务函数引用", trigger="触发器", args=["参数"]) # app.scheduler.add_job(func=fix_statistics, trigger=cron.CronTrigger(hour=4), args=["参数"]) # 触发器凌晨4点执行任务 # app.scheduler.add_job(func=fix_statistics, trigger="cron", hour=4, args=[app]) app.scheduler.add_job(func=fix_statistics, trigger="date", args=[app]) # 4.开启定时任务 app.scheduler.start() # 注册用户模块蓝图 from .resources.user import user_bp app.register_blueprint(user_bp) # 注册新闻模块蓝图 from .resources.news import news_bp app.register_blueprint(news_bp) # 注册通知模块 from .resources.notice import notice_bp app.register_blueprint(notice_bp) # 搜索 from .resources.search import search_bp app.register_blueprint(search_bp) return app
def create_app(config, enable_config_file=False): """ 创建应用 :param config: 配置信息对象 :param enable_config_file: 是否允许运行环境中的配置文件覆盖已加载的配置信息 :return: 应用 """ app = create_flask_app(config, enable_config_file) # 创建Snowflake ID worker from utils.snowflake.id_worker import IdWorker app.id_worker = IdWorker(app.config['DATACENTER_ID'], app.config['WORKER_ID'], app.config['SEQUENCE']) # 如果在视图中需要生成分布式ID # id = current_app.id_worker.get_id() # 限流器 from utils.limiter import limiter as lmt lmt.init_app(app) # 配置日志 from utils.logging import create_logger create_logger(app) # 注册url转换器 from utils.converters import register_converters register_converters(app) from redis.sentinel import Sentinel _sentinel = Sentinel(app.config['REDIS_SENTINELS']) app.redis_master = _sentinel.master_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) app.redis_slave = _sentinel.slave_for( app.config['REDIS_SENTINEL_SERVICE_NAME']) from rediscluster import StrictRedisCluster app.redis_cluster = StrictRedisCluster( startup_nodes=app.config['REDIS_CLUSTER']) # 视图 # current_app.redis_master.set() # current_app.redis_cluster.get() # rpc app.rpc_reco = grpc.insecure_channel(app.config['RPC'].RECOMMEND) # Elasticsearch app.es = Elasticsearch( app.config['ES'], # sniff before doing anything sniff_on_start=True, # refresh nodes after a node fails to respond sniff_on_connection_fail=True, # and also every 60 seconds sniffer_timeout=60) # socket.io # app.sio = socketio.KombuManager(app.config['RABBITMQ'], write_only=True) # MySQL数据库连接初始化 from models import db db.init_app(app) # db = SQLAlchmey(app) # db = SQLAlchemy() # db.init_app(app_ # 创建定时任务工具对象 # 将scheduler对象保存到flask app对象中的目的,是方便视图执行的时候随时产生新的定时任务需求,可以借助current_app.scheduler.add_job()来 # 动态添加新的定时任务 executors = { 'default': ThreadPoolExecutor(10), } app.scheduler = BackgroundScheduler(executors=executors) # 此处可以添加定时任务,这些定时任务与视图程序的执行无关,是在程序启动一开始就确定好的 from .schedule import statistic # 每天凌晨3点执行 app.scheduler.add_job(statistic.fix_statistics, 'cron', hour=3, args=[app]) # 为了测试方便,立即执行 # app.scheduler.add_job(statistic.fix_statistics, 'date', args=[app]) # app.scheduler.add_job() # app.scheduler.add_job() # app.scheduler.add_job() app.scheduler.start() # 添加请求钩子 from utils.middlewares import jwt_authentication app.before_request(jwt_authentication) # 注册用户模块蓝图 from .resources.user import user_bp app.register_blueprint(user_bp) # 注册新闻模块蓝图 from .resources.news import news_bp app.register_blueprint(news_bp) # 注册通知模块 from .resources.notice import notice_bp app.register_blueprint(notice_bp) # 搜索 from .resources.search import search_bp app.register_blueprint(search_bp) return app