def do_copula_4_scheme(scheme_id): """ 根据 scheme_id 计算相关组合的 copula 压力测试 :param scheme_id: :return: """ # sql_str = """select id, ffp.wind_code_p, ffp.wind_code_s, wind_code, date_adj, invest_scale # from fof_fund_pct ffp, # ( # select wind_code_p, max(date_adj) date_latest from fof_fund_pct group by wind_code_p # ) ff_date_latest, # fund_essential_info ffm # where ffp.wind_code_p = ff_date_latest.wind_code_p # and ffp.wind_code_s = ffm.wind_code_s # and ffp.date_adj = ff_date_latest.date_latest""" sql_str = "SELECT wind_code, invest_scale FROM scheme_fund_pct where scheme_id=%(scheme_id)s" engine = get_db_engine() fund_pct_df = pd.read_sql(sql_str, engine, params={'scheme_id': str(scheme_id)}) date_to = date.today() date_to_str = date_to.strftime(STR_FORMAT_DATE) date_from = date_to - timedelta(days=365) date_from_str = date_from.strftime(STR_FORMAT_DATE) simulate_count = STRESS_TESTING_SIMULATE_COUNT_COPULA r = get_redis() wind_code_list = list( fund_pct_df['wind_code']) # ['XT1410445.XT', 'J11039.OF'] wind_code_count = len(wind_code_list) logger.info('do_copula for %d on wind_code_p with %s', scheme_id, wind_code_list) if wind_code_count <= 0: logger.warning('scheme %s has no sub fund list', scheme_id) return st = StressTest('Clayton') weighted_list = np.ones(wind_code_count) # max_dd_list = st.get_max_drawdown(['XT1410445.XT', 'J11039.OF'], '2016-01-01', '2016-12-31', np.ones(2), 10) max_dd_list = st.get_max_drawdown(wind_code_list, date_from_str, date_to_str, weighted_list, simulate_count) if max_dd_list is None or len(max_dd_list) == 0: logger.error('scheme %s has no copula test data. sub fund list: %s', scheme_id, wind_code_list) return # max_dd_list_str = json.dumps(max_dd_list) y, x, patches = plt.hist(max_dd_list, 20) y = list(map(int, y)) x = list(map(float, ["%.3f" % i for i in x])) key = 'scheme_%s_%s' % (scheme_id, 'copula') val_str = json.dumps({"x": x, "y": y}) logger.debug('%s has been completer\n%s', key, val_str) r.set(key, val_str)
def send_log(): r = get_redis(host='10.0.5.107', db=5) t = r.pubsub() t.subscribe('stream') while True: for item in t.listen(): log = item['data'] if log != 1: try: log = log.decode('utf-8') socketio.emit('my_response', {'log':json.loads(log),'states':'connect'}, namespace='/stream') except TypeError: print(log)
def do_fund_multi_factor_by_wind_code_list(wind_code_list): """ 对基金列表分别进行归因分析 :param wind_code_list: :return: """ r = get_redis() for n, wind_code in enumerate(wind_code_list): logger.info('') fund_exposure_his_df = get_fund_exposure_his(wind_code) if fund_exposure_his_df is not None: key = wind_code + 's:multi_factor' fund_exposure_str = json.dumps(fund_exposure_his_df.to_dict()) r.set(key, fund_exposure_str) logger.debug('multi factor on %s with key: %s value:\n%s', wind_code, key, fund_exposure_str)
def __call__(self, app, host, port, use_debugger, use_reloader): # override the default runserver command to start a Socket.IO server if use_debugger is None: use_debugger = app.debug if use_debugger is None: use_debugger = True if use_reloader is None: use_reloader = app.debug r = get_redis(host='127.0.0.1',db=3) r.flushdb() logger.info("用户fof缓存已清空") SocketIo.run(app, host=host, port=port, debug=use_debugger, use_reloader=use_reloader, **self.server_options)
# -*- coding: utf-8 -*- """ :copyright: (c) 2016 by Huang Dong :mail: [email protected]. :license: Apache 2.0, see LICENSE for more details. """ from celery import Celery from celery.schedules import crontab import os from fof_app import create_app from fof_app.tasks import log,update_fund,update_index,update_stock,stress_testing,update_future_daily,update_future_info from config_fh import get_redis from r_log import RedisHandler import logging logger = logging.getLogger() r_handler = RedisHandler(channel='stream',redis_client=get_redis(host='10.0.5.107',db=5)) logger.addHandler(r_handler) def create_celery(app): celery = Celery(app.import_name, backend=app.config['CELERY_RESULT_BACKEND'], broker=app.config['CELERY_BROKER_URL']) celery.conf.update(app.config) TaskBase = celery.Task class ContextTask(TaskBase): abstract = True def __call__(self, *args, **kwargs): with app.app_context(): return TaskBase.__call__(self, *args, **kwargs) celery.Task = ContextTask
# -*- coding: utf-8 -*- """ :copyright: (c) 2016 by Huang Dong :mail: [email protected]. :license: Apache 2.0, see LICENSE for more details. """ from fof_app import create_app import os from config_fh import get_redis import logging logger = logging.getLogger() env = os.environ.get('APP_ENV', 'prod') wsgi_app = create_app('fof_app.config.%sConfig' % env.capitalize()) r = get_redis(db=3) r.flushdb() if __name__ == '__main__': wsgi_app.run() logger.info("用户fof缓存已清空")
# file_path = plot_fof_copula(wind_code_list, weight_list, start_date, end_date, simulate_count) sql_str = """select id, ffp.wind_code_p, wind_code, date_adj, invest_scale from fof_fund_pct ffp, ( select wind_code_p, max(date_adj) date_latest from fof_fund_pct group by wind_code_p ) ff_date_latest where ffp.wind_code_p = ff_date_latest.wind_code_p and ffp.date_adj = ff_date_latest.date_latest""" engine = get_db_engine() fof_fund_df = pd.read_sql(sql_str, engine) wind_code_fund_dic = dict(list(fof_fund_df.groupby('wind_code_p'))) date_to = date.today() - timedelta(days=1) date_to_str = '2016-12-31' # date_to.strftime(STR_FORMAT_DATE) date_from = date_to - timedelta() date_from_str = '2016-01-01' # date_from.strftime(STR_FORMAT_DATE) simulate_count = 1000 r = get_redis() for wind_code_p, fof_fund_sub_df in wind_code_fund_dic.items(): wind_code_list = list(fof_fund_sub_df['wind_code']) wind_code_count = len(wind_code_list) if wind_code_count <= 0: continue st = stress_test('Clayton') weighted_list = np.ones(wind_code_count) max_dd_list = st.get_max_drawdown(['XT1410445.XT', 'J11039.OF'], date_from_str, date_to_str, weighted_list, simulate_count) max_dd_list_str = json.dumps(max_dd_list) r.set(wind_code_p, max_dd_list_str) print('finished')
def do_copula(wind_code_list=[]): # sql_str = """select id, ffp.wind_code_p, wind_code, date_adj, invest_scale # from fof_fund_pct ffp, # ( # select wind_code_p, max(date_adj) date_latest from fof_fund_pct group by wind_code_p # ) ff_date_latest # where ffp.wind_code_p = ff_date_latest.wind_code_p # and ffp.date_adj = ff_date_latest.date_latest""" sql_str = """select id, ffp.wind_code_p, ffp.wind_code_s, wind_code, date_adj, invest_scale from fof_fund_pct ffp, ( select wind_code_p, max(date_adj) date_latest from fof_fund_pct group by wind_code_p ) ff_date_latest, fund_essential_info ffm where ffp.wind_code_p = ff_date_latest.wind_code_p and ffp.wind_code_s = ffm.wind_code_s and ffp.date_adj = ff_date_latest.date_latest""" engine = get_db_engine() fof_fund_df = pd.read_sql(sql_str, engine) # 按 母基金代码进行分组 wind_code_fund_dic = dict(list(fof_fund_df.groupby('wind_code_p'))) # 按 基金代码进行分组,对应所有 wind_code_s 代码 # wind_code_df_dic = dict(list(fof_fund_df[['wind_code', 'wind_code_s']].groupby('wind_code'))) # wind_code_s_dic = {wind_code: set(df['wind_code_s'].values) for wind_code, df in wind_code_df_dic.items()} date_to = date.today() date_to_str = date_to.strftime(STR_FORMAT_DATE) date_from = date_to - timedelta(days=365) date_from_str = date_from.strftime(STR_FORMAT_DATE) simulate_count = STRESS_TESTING_SIMULATE_COUNT_COPULA r = get_redis() if wind_code_list is not None and len(wind_code_list) > 0: wind_code_set = set(wind_code_list) else: wind_code_set = None for wind_code_p, fof_fund_sub_df in wind_code_fund_dic.items(): if wind_code_set is not None and wind_code_p not in wind_code_set: continue wind_code_list = list( fof_fund_sub_df['wind_code']) # ['XT1410445.XT', 'J11039.OF'] wind_code_count = len(wind_code_list) logger.info('do_copula on wind_code_p with %s', wind_code_list) if wind_code_count <= 0: continue st = StressTest('Clayton') weighted_list = np.ones(wind_code_count) # max_dd_list = st.get_max_drawdown(['XT1410445.XT', 'J11039.OF'], '2016-01-01', '2016-12-31', np.ones(2), 10) max_dd_list = st.get_max_drawdown(wind_code_list, date_from_str, date_to_str, weighted_list, simulate_count) if max_dd_list is None or len(max_dd_list) == 0: logger.error('%s has no copula test data. sub fund list: %s', wind_code_p, wind_code_list) continue # max_dd_list_str = json.dumps(max_dd_list) y, x, patches = plt.hist(max_dd_list, 10) y = list(map(int, y)) x = list(map(float, ["%.3f" % i for i in x])) key = '%s:%s' % (wind_code_p, 'copula') logger.debug('%s has been completer' % key) r.set(key, json.dumps({"x": x, "y": y}))
def do_fhs_garch_4_scheme(scheme_id): """ 根据 scheme_id 计算相关组合的 fhs-garch 压力测试 :param scheme_id: :return: """ sql_str = "SELECT wind_code, invest_scale FROM scheme_fund_pct where scheme_id=%s" engine = get_db_engine() fund_pct_df = pd.read_sql(sql_str, engine, params=[str(scheme_id)]) simulate_count = STRESS_TESTING_SIMULATE_COUNT_FHS_GARCH wind_code_list = list( fund_pct_df['wind_code']) # ['XT1410445.XT', 'J11039.OF'] wind_code_count = len(wind_code_list) if wind_code_count <= 0: logger.warning('scheme %s has no sub fund list', scheme_id) return # 执行 fhs-garch压力测试 simulate_comp_df, simulate_df_dic = fof_fhs_garch(wind_code_list, simulate_count) if simulate_comp_df is None: logger.error('scheme %s has no FHS GARCH test data. sub fund list: %s', scheme_id, wind_code_list) return logger.info('do_fhs_garch for %d on wind_code_p with %s', scheme_id, wind_code_list) # 将组合压力测试结果存储到 redis 上面 r = get_redis() for wind_code, simulate_df in simulate_df_dic.items(): time_line = simulate_df.index time_line = [i.strftime('%Y-%m-%d') for i in time_line] df = simulate_df.T.quantile(QUANTILE_LIST).T result = { "time": time_line, "data": [{ "name": i, "data": np.array(df[i]).tolist() } for i in df.columns], 'show_count': simulate_count } # 将wind_code 及 wind_code_s 对应的压力测试结果插入redis key = '%s_%s' % (wind_code, 'fhs_garch') logger.info('%s has been complete,', key) r.set(key, json.dumps(result)) time_line = simulate_comp_df.index time_line = [i.strftime('%Y-%m-%d') for i in time_line] df = simulate_comp_df.T.quantile(QUANTILE_LIST).T result = { "time": time_line, "data": [{ "name": i, "data": np.array(df[i]).tolist() } for i in df.columns], 'show_count': simulate_count } key = 'scheme_%s_%s' % (scheme_id, 'fhs_garch') val_str = json.dumps(result) logger.info('%s has benn complate\n%s', key, val_str) r.set(key, val_str)
def do_fhs_garch(wind_code_p_list=[]): """ 后台作业,对数据库中所有fof基金进行压力测试 如果 wind_code_p_list 不为空,则近执行 wind_code_p_list 中的母基金 :return: """ # sql_str = """select id, ffp.wind_code_p, wind_code_s, date_adj, invest_scale # from fof_fund_pct ffp, # ( # select wind_code_p, max(date_adj) date_latest from fof_fund_pct group by wind_code_p # ) ff_date_latest # where ffp.wind_code_p = ff_date_latest.wind_code_p # and ffp.date_adj = ff_date_latest.date_latest""" sql_str = """select id, ffp.wind_code_p, ffp.wind_code_s, wind_code, date_adj, invest_scale from fof_fund_pct ffp, ( select wind_code_p, max(date_adj) date_latest from fof_fund_pct group by wind_code_p ) ff_date_latest, fund_essential_info ffm where ffp.wind_code_p = ff_date_latest.wind_code_p and ffp.wind_code_s = ffm.wind_code_s and ffp.date_adj = ff_date_latest.date_latest""" engine = get_db_engine() fof_fund_df = pd.read_sql(sql_str, engine) # 按 母基金代码进行分组 wind_code_fund_dic = dict(list(fof_fund_df.groupby('wind_code_p'))) # 按 基金代码进行分组,对应所有 wind_code_s 代码 wind_code_df_dic = dict( list(fof_fund_df[['wind_code', 'wind_code_s']].groupby('wind_code'))) wind_code_s_dic = { wind_code: set(df['wind_code_s'].values) for wind_code, df in wind_code_df_dic.items() } date_to = date.today() date_to_str = date_to.strftime(STR_FORMAT_DATE) date_from = date_to - timedelta(days=365) date_from_str = date_from.strftime(STR_FORMAT_DATE) simulate_count = STRESS_TESTING_SIMULATE_COUNT_FHS_GARCH r = get_redis() if wind_code_p_list is not None and len(wind_code_p_list) > 0: wind_code_p_set = set(wind_code_p_list) else: wind_code_p_set = None for wind_code_p, fof_fund_sub_df in wind_code_fund_dic.items(): if wind_code_p_set is not None and wind_code_p not in wind_code_p_set: logger.debug('%s 不在列表中 跳过', wind_code_p) continue wind_code_list = list( fof_fund_sub_df['wind_code']) # ['XT1410445.XT', 'J11039.OF'] wind_code_count = len(wind_code_list) if wind_code_count <= 0: continue simulate_comp_df, simulate_df_dic = fof_fhs_garch( wind_code_list, simulate_count) if simulate_comp_df is None: logger.error('%s has no FHS GARCH test data. sub fund list: %s', wind_code_p, wind_code_list) continue for wind_code, simulate_df in simulate_df_dic.items(): time_line = simulate_df.index time_line = [i.strftime('%Y-%m-%d') for i in time_line] df = simulate_df.T.quantile(QUANTILE_LIST).T result = { "time": time_line, "data": [{ "name": i, "data": np.array(df[i]).tolist() } for i in df.columns] } result['show_count'] = simulate_count # 将wind_code 及 wind_code_s 对应的压力测试结果插入redis key = '%s_%s' % (wind_code, 'fhs_garch') logger.info('%s has been complete,', key) r.set(key, json.dumps(result)) for wind_code_s in wind_code_s_dic[wind_code]: key = '%s_%s' % (wind_code_s, 'fhs_garch') logger.info('%s has been complete,', key) r.set(key, json.dumps(result)) time_line = simulate_comp_df.index time_line = [i.strftime('%Y-%m-%d') for i in time_line] df = simulate_comp_df.T.quantile(QUANTILE_LIST).T result = { "time": time_line, "data": [{ "name": i, "data": np.array(df[i]).tolist() } for i in df.columns], 'show_count': simulate_count } key = '%s_%s' % (wind_code_p, 'fhs_garch') logger.info('%s has benn complate', key) r.set(key, json.dumps(result))