def update_schema(db_name, sub_name): """ 更新schema相关的begin date,end date, last update 适用于非factor_return相关的数据库 @db_name (str): db的名称 eg. FACTOR 排除factor_return @sub_name (str): db中各子数据库的名称 eg. VALUE GROWTH """ schema = json2dict(os.path.join(DB_PATH_LIB[db_name], 'schema')) assert sub_name date_list = get_date_lists_in_table(DB_PATH_LIB[db_name], sub_name) schema[sub_name]['begin date'] = date_list[0] schema[sub_name]['end date'] = date_list[-1] schema[sub_name]['last update'] = datetime.now().strftime( '%Y-%m-%d %H:%M:%S') Logger.info("schema updated: {}".format(sub_name)) dict2json(schema, os.path.join(DB_PATH_LIB[db_name], 'schema'), log=False) a = pd.DataFrame(schema).T col_names = [ 'aspect', 'type', 'begin date', 'end date', 'last update', 'col_names', 'field', 'kwargs', 'explanation' ] b = a.reindex(columns=col_names).reset_index().rename(columns={ 'index': 'indicator' }).sort_values(['type', 'aspect', 'field']) b.to_csv(os.path.join(DB_PATH_LIB[db_name], 'schema.csv'), index=False)
def update_calendar(start_date, end_date, log=False): """ 从Wind更新calendar相关数据 每次更新将删除原有所有数据 更新到当前区间 @start_date ("%Y-%m-%d"): 开始日日期 必须是月初日期 @end_date ("%Y-%m-%d"): 结束日日期 必须是月月末日期 @log (Bool): 是否打印log """ Logger.info("Updating calendar ...", "green") max_existed_date = get_trading_days with SqliteProxy(log=log) as proxy: proxy.connect(os.path.join(DB_CALENDAR_PATH, "calendar.db")) proxy.execute("DELETE FROM calendar") try: df = load_calendar_from_wind(start_date, end_date) except Exception: Logger.error("Error occurred when loading") raise ValueError try: proxy.write_from_dataframe(df, "calendar") except Exception: Logger.error( "Error occurred when writing dataframe into sqlite db") traceback.print_exc() raise ValueError if log: Logger.info("calendar was updated from {} to {}".format( start_date, end_date), color="green") Logger.info("------------------------------------------")
def sqlize_db_industry(subdb): """ 将 industry sql化 @subdb (str): 子数据库名 """ db_path = DB_PATH_LIB['industry'] subdb_path = os.path.join(db_path, subdb) trading_days = listdir_advanced(subdb_path, 'json', strip_suffix=True) with SqliteProxy(log=False) as proxy: for year, dates in classify_dates_by_year(trading_days).items(): path = os.path.join(db_path, '{}.db'.format(year)) proxy.connect(path) if subdb not in proxy.list_tables: create_db_03(proxy, subdb) for date in dates: js = json2dict(os.path.join(subdb_path, '{}.json'.format(date))) df = pd.DataFrame(list(js.items()), columns=['sec_id', 'industry']) df['date'] = date try: proxy.write_from_dataframe(df, "A_SWL1") except Exception: Logger.error( "Error occurred when sqlizing {} on {}.".format( subdb, date)) traceback.print_exc()
def get_factor_return_daily(factor_return_name, trading_days=[]): """ 从本地数据库中获取某段日期某个factor_return的日收益率 @factor_return_name (str): factor名称 @trading_days (['%Y-%m-%d']): 日期列表 :return: DataFrame, index: date, columns: [sec_id, group01-group10, factor] """ if factor_return_name not in get_schema( "factor_return"): # 判断所给定的factor_return是否存在本地factor库中 Logger.error( "{} is not in FACTOR_RETURN library".format(factor_return_name)) return else: filepath = os.path.join(DB_FACTOR_RETURN_PATH, "{}.csv".format(factor_return_name)) df_info = open_csv_as_df(filepath, validate=True) if not trading_days: output = df_info.copy() else: output = df_info[df_info.date.isin(trading_days)] not_found_dates = set(trading_days) - set(output["date"].tolist()) if not_found_dates: Logger.warn( "Following dates are invalid: {}".format(not_found_dates)) return output = output.set_index(['date']) return output
def update_industry_to_json(industry, trading_days): try: date = trading_days[-1] index_code, loader = INDEX_LOADER_MAP[industry] info = loader(index_code, date, level=1) except Exception: Logger.error("Error occurred when loading {} on {}".format(industry, date)) raise ValueError try: path = os.path.join(DB_INDUSTRY, '{}.json'.format(industry)) copy_to = os.path.join(DB_INDUSTRY, '{}_backup.json'.format(industry)) shutil.copy(path, copy_to) # 保存副本,以防数据损坏 dict2json(info, path, log=False) Logger.info("{} on {} is updated successfully".format(industry, date)) except Exception: Logger.error("Error occurred when writing {} on {}".format(industry, date)) raise ValueError # json files are different from sql, cannot use update_schema() # therefore update schema information explicitly try: now = datetime.now() schema = get_schema('industry') schema[industry]["begin date"] = "" schema[industry]["end date"] = now.strftime('%Y-%m-%d') schema[industry]['last update'] = now.strftime('%Y-%m-%d %H:%M:%S') save_schema(schema, 'industry') Logger.info("schema updated: {}".format(industry)) except Exception: Logger.error("Error occurred when updating schema of {}".format(industry)) traceback.print_exc() raise ValueError
def get_secs_name_from_sql(sec_ids=[], index_code="A"): """ 获取最新日期A股或港股股票名称,数据格式为 {股票代码:股票名称} @sec_id (list): 股票列表 默认为空 表示最新日期的所有A股 @index_code (str): 数据库名称 :return: {sec_id: sec_name} """ last_date = get_schema('index_contents')[index_code]['end date'] dbpath = os.path.join(DB_INDEX_CONTENTS, '{}.db'.format(last_date[:4])) with SqliteProxy(log=False) as proxy: proxy.connect(dbpath) if len(sec_ids) == 0: # 默认取所有股票 query = "SELECT sec_id, sec_name FROM [{}] WHERE date = '{}'".format( index_code, last_date) elif len(sec_ids) == 1: query = "SELECT sec_id, sec_name FROM [{}] WHERE date = '{}' AND sec_id = '{}'".format( index_code, last_date, sec_ids[0]) else: query = "SELECT sec_id, sec_name FROM [{}] WHERE date = '{}' AND sec_id in {}".format( index_code, last_date, tuple(sec_ids)) df = proxy.query_as_dataframe(query).set_index("sec_id") if len(df) == 0: Logger.warn("Empty result for query contents from {} on {}".format( index_code, last_date)) output = { sec: df.at[sec, "sec_name"] for sec in sec_ids if sec in df.index } return output
def update_index_contents_to_sql(index_code, trading_days, override, log=False): with SqliteProxy(log=log) as proxy: date_classfier = classify_dates_by_year(trading_days) for year, date_list in date_classfier.items(): path = os.path.join(DB_INDEX_CONTENTS, '{}.db'.format(year)) proxy.connect(path) if index_code not in proxy.list_tables: create_table(proxy, "index_contents", index_code) # 判断已有数据 query = "SELECT DISTINCT(date) FROM [{}]".format(index_code) lookup = proxy.query_as_dataframe(query) lookup = set(lookup['date'].tolist()) for date in date_list: if date in lookup and not override: # 更新的日期已经存在于数据库时,不覆盖则跳过 if log: Logger.warn("{} records on {} is existed.".format( index_code, date)) continue try: loader = LOADER_MAP[index_code] df = loader(index_code, date) df['date'] = date except Exception: Logger.error("Error occurred when loading {} on {}".format( index_code, date)) raise ValueError if df is not None: # 从Wind下载数据成功时 try: if date in lookup and override: # 覆盖时删除原记录 proxy.execute( "DELETE FROM [{}] WHERE date = '{}'".format( index_code, date)) proxy.write_from_dataframe(df, index_code) except Exception: Logger.error( "Error occurred when writing {} on {}".format( index_code, date)) traceback.print_exc() raise ValueError Logger.info("{} on {} is updated successfully".format( index_code, date)) else: # 从wind提取数据失败时 Logger.error("Fail to fetch {} data on {}".format( index_code, date)) raise ValueError update_schema('index_contents', index_code)
def get_secs_factor_on_multidays(factor, sec_ids=[], trading_days=[], log=False): """ 从本地数据库中获取一段日期的单个factor的值,并返回 dict of DataFrame @factor (str): 单个factor @sec_ids (list): 支持多个股票查询,默认为[],表示查询范围是全A股 @trading_days (["%Y-%m-%d"]): 日期列表 @log (Bool): 是否打印log :return: {date: Dataframe},其中 DataFrame 列为factor名,index为sec_id """ if log: Logger.info( "Reading {} from {} to {}".format(factor, trading_days[0], trading_days[-1]), "green") if factor not in get_schema("factor"): Logger.error("Unrecognized factor: {}".format(factor)) raise ValueError if not isinstance(sec_ids, list): Logger.error("sec_ids must be list!") raise ValueError if not trading_days: Logger.error("Empty date") raise ValueError # 长连接效率更高,所以这里不是复用 get_secs_factor 而是重新写 with SqliteProxy(log=log) as proxy: output = {} for year, date_list in classify_dates_by_year(trading_days).items(): path = os.path.join(DB_FACTOR, '{}.db'.format(year)) proxy.connect(path) for date in date_list: if len(sec_ids) == 0: # 为空默认全A股 conds = "" elif len(sec_ids) == 1: conds = "AND sec_id = '{}'".format(sec_ids[0]) else: conds = "AND sec_id IN {}".format(tuple(sec_ids)) query = "SELECT sec_id, {} FROM [{}] WHERE date = '{}' {}".format( factor, factor, date, conds) try: df = proxy.query_as_dataframe(query) except Exception: Logger.error("Error occurred when reading {} at {}".format( factor, date)) traceback.print_exc() raise ValueError output[date] = df return output
def open_db_folder(db=""): if not db: path = DB_PATH elif db in DB_PATH_LIB: path = DB_PATH_LIB[db] else: Logger.error("db not found: {}".format(db)) subprocess.Popen(r'explorer "{}"'.format(path))
def get_index_contents_on_multidays(index_code, trading_days=[], log=False): """ 读取多个日期某指数全部股票列表 @index_code (str): 指数代码,目前支持 ['A', '000905.SH', '000300.SH', '000016.SH'] @trading_days (['%Y-%m-%d']): 日期列表 @log (Bool): 是否打印log :return: ({date: list}), key为date value为 股票代码列表 """ if log: Logger.info( "Reading all {} records between trading_days ...".format( index_code), "green") if len(trading_days) == 0: Logger.error("Empty date") raise ValueError elif len(trading_days) == 1: date = trading_days[0] return {date: get_index_contents(index_code, date, log=False)} output = {} if index_code in IDXCONT_AS_SQL: with SqliteProxy(log=log) as proxy: for year, date_list in classify_dates_by_year( trading_days).items(): path = os.path.join(DB_INDEX_CONTENTS, '{}.db'.format(year)) proxy.connect(path) query = "SELECT date, sec_id FROM [{}] WHERE date IN {}".format( index_code, tuple(date_list)) try: df = proxy.query_as_dataframe(query) except Exception: Logger.error( "Empty result when reading {} from {} to {}".format( index_code, trading_days[0], trading_days[-1])) traceback.print_exc() raise ValueError if len(df) == 0: Logger.warn( "Empty result when reading {} from {} to {}".format( index_code, trading_days[0], trading_days[-1])) for date in date_list: output[date] = df[df.date == date]['sec_id'].tolist() elif index_code in IDXCONT_AS_CSV: info = get_index_contents_from_csv(index_code) output = {date: info for date in trading_days} else: Logger.error("Unrecognized index code: {}".format(index_code)) raise ValueError return output
def get_secs_factor(factor, sec_ids=[], date="", log=False): """ 从本地数据库中获取单个日期的单个factor的值,并返回 DataFrame @factor (str): 单个factor @sec_ids (list): 支持多个股票查询,默认为[],表示查询范围是全A股 @date ('%Y-%m-%d'): 单个日期 @log (Bool): 是否打印log :return: Dataframe 列为factor名,index为sec_id """ if log: Logger.info("Reading {} at {}".format(factor, date), "green") if factor not in get_schema("factor"): Logger.error("Unrecognized factor: {}".format(factor)) raise ValueError if not isinstance(sec_ids, list): Logger.error("sec_ids must be list!") raise ValueError if not date: Logger.error("Empty date") raise ValueError with SqliteProxy(log=log) as proxy: path = os.path.join(DB_FACTOR, '{}.db'.format(date[:4])) proxy.connect(path) if len(sec_ids) == 0: # 为空默认全A股 conds = "" elif len(sec_ids) == 1: conds = "AND sec_id = '{}'".format(sec_ids[0]) else: conds = "AND sec_id IN {}".format(tuple(sec_ids)) query = "SELECT sec_id, {} FROM [{}] WHERE date = '{}' {}".format( factor, factor, date, conds) try: df = proxy.query_as_dataframe(query) except Exception: Logger.error("Error occurred when reading {} at {}".format( factor, date)) traceback.print_exc() raise ValueError return df.sort_values(by=['sec_id']).set_index(['sec_id'])
def load_secs_industry_gics_from_wind(index_code, date, level=1): """ 从Wind更新指定index成分股的gics行业数据 @index_code (str): "H_GICSL1" @date (%Y-%m-%d): 单个日期 @level (int): 行业级数 默认为1 表示为申万1级行业分类 :return: (dict of str): 键是证券代码,值是行业名称 """ universe = get_index_contents(index_code, date) if not universe: Logger.error("Empty universe at {}!".format(date)) return {} output = get_secs_industry_gics(sec_ids=universe, level=1) return output
def load_secs_industry_sw_from_wind(index_code, date, level=1): """ 从Wind更新指定index成分股的申万行业数据 @index_code (str): 指数代码 可选代码: "A" "H" @date (%Y-%m-%d): 单个日期 @level (int): 行业级数 默认为1 表示为申万1级行业分类 :return: (dict of str): 键是证券代码,值是行业名称 """ universe = get_index_contents(index_code, date, log=False) if not universe: Logger.error("Empty universe at {}!".format(date)) return {} output = get_secs_industry_sw(sec_ids=universe, date=date, level=level, market=index_code) return output
def get_secs_index_std(index_std, sec_ids=[], trading_days=[], log=False): """ 从本地数据库中获取一段日期的单个index_std的值,并返回 DataFrame @index_std (str): 单个index_std @sec_ids (list): 支持多个股票查询,默认为[],表示查询范围是全A股 @trading_days (["%Y-%m-%d"]): 日期列表 @log (Bool): 是否打印log :return: {date: Dataframe},其中 DataFrame 列为index_std名,index_std为sec_id """ if log: Logger.info("Reading {} from {} to {}".format(index_std, trading_days[0], trading_days[-1]), "green") # if index_std not in get_schema("index_std"): # Logger.error("Unrecognized index_std: {}".format(index_std)) # raise ValueError if not isinstance(sec_ids, list): Logger.error("sec_ids must be list!") raise ValueError if not trading_days: Logger.error("Empty date") raise ValueError with MySQLProxy(log=log) as proxy: output={} proxy.connect(USER, PASSWORD, "index_std") # 注: 单个值用=,需要加上引号,多个值用tuple if len(sec_ids) == 0: if len(trading_days) == 1: query="SELECT * FROM {} WHERE date = '{}' ".format(index_std, trading_days[0]) else: query="SELECT * FROM {} WHERE date in {}".format(index_std, tuple(trading_days)) elif len(sec_ids) == 1: if len(trading_days) == 1: query="SELECT * FROM {} WHERE date = '{}' AND sec_id = '{}' ".format(index_std, trading_days[0], sec_ids[0]) else: query="SELECT * FROM {} WHERE date in {} AND sec_id = '{}' ".format(index_std, tuple(trading_days), sec_ids[0]) else: if len(trading_days) == 1: query="SELECT * FROM {} WHERE date = '{}' AND sec_id in {}".format(index_std, trading_days[0], tuple(sec_ids)) else: query="SELECT * FROM {} WHERE date in {} AND sec_id in {}".format(index_std, tuple(trading_days), tuple(sec_ids)) try: df=proxy.query_as_dataframe(query) except Exception: Logger.error("Error occurred when reading {} ".format(inde)) traceback.print_exc() raise ValueError df['date']=df['date'].apply(lambda x: str(x)) return df
def update_indicators(indicators=[], trading_days=[], sec_ids=[], override=False, log=False): """ 更新多个indicator的指定日期列表的数据 @indicators (list): indicator的名称构成的列表 @trading_days ([%Y-%m-%d]): 日期列表 @override (Bool): 是否覆盖原记录 默认为False 表示不覆盖 @log (Bool): 是否打印log """ SCHEMA = get_schema('indicator') if not indicators: indicators = list(SCHEMA.keys()) start = trading_days[0] end = trading_days[-1] update_days_map = { "财报数据": set(get_report_days(start, end)), "时间序列": set(get_trading_days(start, end)), } for ind in indicators: if ind in SCHEMA: # 更新日期取交集 itype = SCHEMA[ind]['type'] update_days = [ t for t in trading_days if t in update_days_map[itype] ] if not update_days: Logger.warn("No valid days to update!") else: update_single_indicator(indicator=ind, trading_days=update_days, sec_ids=sec_ids, override=override, log=log) else: Logger.error("Unrecognized indicator: {}".format(ind))
def get_index_contents_from_sql(index_code, date="", log=False): path = os.path.join(DB_INDEX_CONTENTS, '{}.db'.format(date[:4])) with SqliteProxy(log=log) as proxy: proxy.connect(path) query = "SELECT sec_id FROM [{}] WHERE date = '{}'".format( index_code, date) try: df = proxy.query_as_dataframe(query) except Exception: Logger.error("Error occurred when reading {} at {}".format( index_code, date)) traceback.print_exc() raise ValueError if len(df) == 0: Logger.warn("Empty result when reading {} at {}".format( index_code, date)) return [] return df["sec_id"].tolist()
def get_secs_name(sec_ids=[]): """ 获取最新日期股票名称,自动处理A股和H股,数据格式为 {股票代码:股票名称} @sec_id (list): 股票列表 默认为空 表示最新日期的所有A股和H股 :return: {sec_id: sec_name} """ classifier = classify_equity(sec_ids) output = {} if classifier["A股"]: output.update(get_secs_name_from_sql(classifier["A股"], "A")) if classifier["港股"]: output.update(get_secs_name_from_csv(classifier["港股"], "H")) if classifier["其他"]: Logger.warn("Unrecognized sec_ids: {}".format(classifier["其他"])) return output
def get_index_weights(index_code, date=""): """ 读取单个日期指数成分股的权重 @index_code (str): 指数代码,目前支持 ['000016.SH', '000300.SH', '000905.SH'] @date (%Y-%m-%d): 单个日期 :return: {sec_id: weight} """ if not date: Logger.error("Empty date") raise ValueError if index_code not in ['000016.SH', '000300.SH', '000905.SH']: Logger.error("Invalid index code: {}".format(index_code)) dbpath = os.path.join(DB_INDEX_CONTENTS, '{}.db'.format(date[:4])) with SqliteProxy(log=False) as proxy: proxy.connect(dbpath) query = "SELECT sec_id, weight FROM [{}] WHERE date = '{}' ".format( index_code, date) df = proxy.query_as_dataframe(query) if len(df) == 0: Logger.warn("Empty result when reading {} at {}".format( index_code, date)) output = {} else: output = { df.at[i, 'sec_id']: df.at[i, 'weight'] for i in range(len(df)) } return output
def get_index_contents(index_code, date="", approx=False, log=False): """ 读取单个日期指数成分股列表 @index_code (str): 指数代码,目前支持 ['A', 'H', '000905.SH', '000300.SH', '000016.SH', 'HSI.HI'] @date ('%Y-%m-%d'): 单个日期 @log (Bool): 是否打印log :return (list): 股票代码列表 """ if log: Logger.info( "Reading index contents of {} on {}".format(index_code, date), "green") if not date: Logger.error("Empty date") raise ValueError # approx 用于保证更新 indicator 财报数据时财报日非交易日的情况 if approx: date = get_nearest_trading_day(date=date, direction='left', self_included=True) if index_code in IDXCONT_AS_SQL: output = get_index_contents_from_sql(index_code, date, log=log) elif index_code in IDXCONT_AS_CSV: output = get_index_contents_from_csv(index_code) else: Logger.error("Unrecognized index code: {}".format(index_code)) raise ValueError return output
def update_factors_return(factors_ret_to_update=[], trading_days=[], group_num=10, log=True): """ 根据trading_days更新factor_return数据 @factors_ret_to_update (<list>): factor列表 @trading_days (<[%Y-%m-%d]>) : 日期列表 @group_num (<int>): 分组个数 @log (<Bool>): 是否打印log """ factor_return_schema = get_schema('factor_return') if len(factors_ret_to_update) == 0: factors_ret_to_update = list(factor_return_schema.keys()) for factor_ret in factors_ret_to_update: if factor_ret not in factor_return_schema: Logger.error("Unrecognized factor return: {}".format(factor_ret)) else: update_single_factor_return(factor_ret, trading_days, group_num, log)
def update_index_contents_to_csv(index_code, trading_days, override): try: date = trading_days[-1] df = loader(index_code, date) loader = LOADER_MAP[index_code] except Exception: Logger.error("Error occurred when loading {}".format(index_code)) raise ValueError try: path = os.path.join(DB_INDEX_CONTENTS, '{}.csv'.format(index_code)) copy_to = os.path.join(DB_INDEX_CONTENTS, '{}_backup.csv'.format(index_code)) shutil.copy(path, copy_to) # 保存副本,以防数据损坏 df.to_csv(path, encoding="utf-8", index=False) Logger.info("{} on {} is updated successfully".format( index_code, date)) except Exception: Logger.error("Error occurred when writing {}".format(index_code)) traceback.print_exc() raise ValueError # csv files are different from sql, cannot use update_schema() # therefore update schema information explicitly try: now = datetime.now() schema = get_schema('index_contents') schema[index_code]["begin date"] = "" schema[index_code]["end date"] = now.strftime('%Y-%m-%d') schema[index_code]['last update'] = now.strftime('%Y-%m-%d %H:%M:%S') save_schema(schema, 'index_contents') Logger.info("schema updated: {}".format(index_code)) except Exception: Logger.error( "Error occurred when updating schema of {}".format(index_code)) traceback.print_exc() raise ValueError
def get_secs_industry(industry_code, sec_ids=[], date=""): """ 获取某日期某些股票的的行业分类信息,数据格式 {股票代码:行业分类} @industry_code (str): 子数据库名称,目前支持 ["A_SWL1", "H_SWL1", "H_GICSL1"] @sec_ids: (list) 股票列表 @date: ("%Y-%m-%d") 单个日期 return: {sec_id: industry},不存在则忽略 """ if len(sec_ids) == 0: Logger.warn("Empty sec_ids when reading {} on {}!".format(industry_code, date)) return {} if industry_code in INDUSTRY_AS_SQL: output = get_secs_industry_from_sql(industry_code, sec_ids, date) elif industry_code in INDUSTRY_AS_JSON: output = get_secs_industry_from_json(industry_code, sec_ids) else: Logger.error("Unrecognized industry code: {}".format(industry_code)) raise ValueError return output
def update_factors(factors=[], trading_days=[], override=False, log=False): """ 更新多个factor的指定日期列表的数据 @factors (<list>):factor名称构成的列表 @trading_days ([%Y-%m-%d]): 日期列表 @override (<Bool>): 是否覆盖原记录 默认为False 表示不覆盖 @log (<Bool>): 是否打印log """ SCHEMA = get_schema("factor") if not factors: factors = sorted(SCHEMA, key=lambda x: SCHEMA[x]["level"]) for fac in factors: if fac in SCHEMA: update_single_factor(factor=fac, trading_days=trading_days, override=override, log=log) else: Logger.error("Unrecognized factor: {}".format(fac)) Logger.info("------------------------------------------")
def update_factor_return_schema(factor): """ 更新factor_return的schema相关的begin date,end date, last update @factor (str): factor的名称 """ schema = json2dict(os.path.join(DB_PATH_LIB['factor_return'], 'schema')) filepath = os.path.join(DB_PATH_LIB['factor_return'], "{}.csv".format(factor)) df = pd.read_csv(filepath, encoding="utf-8")["date"] schema[factor]['begin date'] = df.min() schema[factor]['end date'] = df.max() schema[factor]['last update'] = \ datetime.now().strftime('%Y-%m-%d %H:%M:%S') Logger.info("schema updated: {}".format(factor)) dict2json(schema, os.path.join(DB_PATH_LIB['factor_return'], 'schema'), log=False)
def sqlize_db(db_name, subdb_list=[]): """将数据库sql化""" if not subdb_list: subdb_list = list(get_schema(db_name).keys()) else: subdb_list = [s for s in subdb_list if s in get_schema(db_name)] db_path = os.path.join(DB_PATH, db_name) with SqliteProxy(log=False) as proxy: for subdb in subdb_list: Logger.info("SQLing {}/{}".format(db_name, subdb), "green") subdb_path = os.path.join(db_path, subdb) trading_days = listdir_advanced(subdb_path, 'csv', strip_suffix=True) for year, dates in classify_dates_by_year(trading_days).items(): path = os.path.join(db_path, '{}.db'.format(year)) proxy.connect(path) if subdb not in proxy.list_tables: creator = DB_CREATOR_MAP[db_name] creator(proxy, subdb) for date in dates: df = pd.read_csv( os.path.join(subdb_path, '{}.csv'.format(date))) df['date'] = date try: proxy.write_from_dataframe(df, subdb) except Exception: Logger.error( "Error occurred when sqlizing {} on {}.".format( subdb, date)) traceback.print_exc()
def generate_table_template(db, table_name): """生成数据库建表模板""" if db in ("indicator", "factor"): template = [ ("date", "CHAR(10)", False, False), ("sec_id", "TEXT", False, False), (table_name, "REAL", False, True), ] elif db == "index_contents": if table_name == "A_SWL1": template = [ ("date", "CHAR(10)", False, False), ("sec_id", "TEXT", False, False), ("sec_name", "TEXT", False, False), ] elif table_name in ('000016.SH', '000300.SH', '000905.SH'): template = [ ("date", "CHAR(10)", False, False), ("sec_id", "TEXT", False, False), ("sec_name", "TEXT", False, False), ("weight", "REAL", False, False), ] else: Logger.error("Unrecognized table name: {}".format(table_name)) raise ValueError elif db == "industry": template = [ ("date", "CHAR(10)", False, False), ("sec_id", "TEXT", False, False), ("industry", "TEXT", False, True), ] else: Logger.error("Unrecognized db name: {}".format(db)) raise ValueError return template
def get_secs_industry_SWL1(sec_ids=[], date=""): """ 获取某日期某些股票的的申万一级行业分类信息,自动处理A股和H股,数据格式 {股票代码:行业分类} @sec_id: (list) 股票列表 @date: (%Y-%m-%d) 单个日期 """ if len(sec_ids) == 0: Logger.warn("Empty sec_ids when reading SWL1 on {}!".format(date)) return {} classfier = classify_equity(sec_ids) output_A = {} if classfier['A股']: output_A = get_secs_industry(industry_code="A_SWL1", sec_ids=classfier['A股'], date=date) output_H = {} if classfier['港股']: output_H = get_secs_industry(industry_code="H_SWL1", sec_ids=classfier['港股'], date=date) output_A.update(output_H) return output_A
def calculate_factor(factor, date): """ 通过对indicator的计算得到因子的值 :param: factor (str): 该factor的名字 :param: date (%Y-%m-%d): 日期 :return: dataframe 处理后的因子值 """ func = getattr(formula, "calculate_raw_{}".format(factor)) if func is None: Logger.error("Formula not implemented: {}".format(factor)) raise ValueError context, df_today, missing_flag = load_context(factor, date) last_day = get_previous_existed_day_in_table(date, DB_FACTOR, factor) if missing_flag == 1: if last_day is None: # 无最新数据 Logger.error("当前日期数据缺失值太多,且之前没有可以复制的文件") raise ValueError else: Logger.warn("由于 {} 值缺失太多直接复制于 {}".format(date, last_day)) try: df_last = get_secs_factor(factor, sec_ids=[], date=last_day, log=False) except Exception: traceback.print_exc() Logger.warn("无法提取 {} 上个记录日的数据".format(factor)) raise ValueError value = df_today.merge(df_last, how="left", left_on='sec_id', right_index=True) return value else: data_raw = func(context) data_final = statistical_process( # 数据处理: 缺失值分离 winsorize 标准化 data=data_raw, var=factor, winsor_LB=WINSORIZE_LB, winsor_UB=WINSORIZE_UB) return data_final
def load_single_indicator_on_single_day_from_wind(indicator, sec_ids, date, log=False): """ 从wind上下载某个指定日期的指标 @sec_ids<list> : 股票代码列表 @indicator (str): 指标名称,仅支持单个indicator传递 @date ("%Y-%m-%d): 单个日期 return: DataFrame,columns=['sec_id','indicator_name'] """ WindAPI.login(is_quiet=True) schema = SCHEMA[indicator] options = schema['kwargs'] if len(sec_ids) != 0: # 为空表示全A股 universe = sec_ids if schema["type"] == "时间序列": if len(sec_ids) == 0: universe = get_index_contents(index_code="A", date=date, log=log) if universe is None: Logger.error("Fail to fetch stock lists on {}".format(date)) raise ValueError options["tradeDate"] = date.replace("-", "") elif schema["type"] == "财报数据": # approx参数为True,保证财报日为非交易日的情形 if len(sec_ids) == 0: universe = get_index_contents(index_code="A", date=date, approx=True, log=log) if universe is None: Logger.error("Fail to fetch stock lists on: {}".format(date)) raise ValueError options["rptDate"] = date.replace("-", "") else: Logger.error("Unrecognized indicator type: {}".format(schema["type"])) raise ValueError response = WDServer.wss(codes=",".join(universe), fields=SCHEMA[indicator]['field'], options=options2str(options)) WindAPI.test_error(response) df = {field: response.Data[i] for i, field in enumerate(response.Fields)} df = pd.DataFrame(df, index=response.Codes).reset_index() df.columns = ["sec_id", indicator] return df
def update_index_std(index, cp=3, log=False): """ 更新index_std 更新原理: 无需指定trading_days 更新全部index中有的日期但在index_std中没有的日期 @index <str>: index名称 不是index_std名称 @cp <int>: winsorize的临界值 """ trading_days = get_unique_datelist_from_table("index", index) existed_days = get_unique_datelist_from_table("index_std", "{}_std".format(index)) update_days = sorted(list(set(trading_days) - set(existed_days))) if len(update_days) == 0: Logger.warn("All given dates has existed. No need to update!!") return output = process_ts_index(index, update_days, cp) if len(output) == 0: Logger.error("Fail to process {} on given dates".format(index)) df2mysql(USER, PASSWORD, "index_std", index + '_std', output) del output, trading_days, update_days gc.collect() Logger.info("Updated successfully!!")