Пример #1
0
 def calc_factor_loading_(cls, start_date, end_date=None, month_end=True, save=False, **kwargs):
     """
     计算指定日期的样本个股的因子载荷, 并保存至因子数据库
     Parameters:
     --------
     :param start_date: datetime-like, str
         开始日期, 格式: YYYY-MM-DD or YYYYMMDD
     :param end_date: datetime-like, str
         结束日期, 如果为None, 则只计算start_date日期的因子载荷, 格式: YYYY-MM-DD or YYYYMMDD
     :param month_end: bool, 默认为True
         如果为True, 则只计算月末时点的因子载荷
     :param save: bool, 默认为True
         是否保存至因子数据库
     :param kwargs:
         'multi_proc': bool, True=采用多进程, False=采用单进程, 默认为False
     :return: dict
         因子载荷数据
     """
     # 取得交易日序列
     start_date = Utils.to_date(start_date)
     if end_date is not None:
         end_date = Utils.to_date(end_date)
         trading_days_series = Utils.get_trading_days(start=start_date, end=end_date)
     else:
         trading_days_series = Utils.get_trading_days(end=start_date, ndays=1)
     # 遍历交易日序列, 计算ResVolatility因子下各个成分因子的因子载荷
     if 'multi_proc' not in kwargs:
         kwargs['multi_proc'] = False
     for calc_date in trading_days_series:
         if month_end and (not Utils.is_month_end(calc_date)):
             continue
         # 计算各成分因子的因子载荷
         for com_factor in risk_ct.RESVOLATILITY_CT.component:
             factor = eval(com_factor + '()')
             factor.calc_factor_loading(start_date=calc_date, end_date=None, month_end=month_end, save=save, multi_proc=kwargs['multi_proc'])
         # 合成ResVolatility因子载荷
         resvol_factor = pd.DataFrame()
         for com_factor in risk_ct.RESVOLATILITY_CT.component:
             factor_path = os.path.join(factor_ct.FACTOR_DB.db_path, eval('risk_ct.' + com_factor + '_CT')['db_file'])
             factor_loading = Utils.read_factor_loading(factor_path, Utils.datetimelike_to_str(calc_date, dash=False))
             factor_loading.drop(columns='date', inplace=True)
             factor_loading[com_factor] = Utils.normalize_data(Utils.clean_extreme_value(np.array(factor_loading['factorvalue']).reshape((len(factor_loading), 1))))
             factor_loading.drop(columns='factorvalue', inplace=True)
             if resvol_factor.empty:
                 resvol_factor = factor_loading
             else:
                 resvol_factor = pd.merge(left=resvol_factor, right=factor_loading, how='inner', on='id')
         resvol_factor.set_index('id', inplace=True)
         weight = pd.Series(risk_ct.RESVOLATILITY_CT.weight)
         resvol_factor = (resvol_factor * weight).sum(axis=1)
         resvol_factor.name = 'factorvalue'
         resvol_factor.index.name = 'id'
         resvol_factor = pd.DataFrame(resvol_factor)
         resvol_factor.reset_index(inplace=True)
         resvol_factor['date'] = Utils.get_trading_days(start=calc_date, ndays=2)[1]
         # 保存ResVolatility因子载荷
         if save:
             Utils.factor_loading_persistent(cls._db_file, Utils.datetimelike_to_str(calc_date, dash=False), resvol_factor.to_dict('list'),['date', 'id', 'factorvalue'])
Пример #2
0
    def calc_factor_loading(cls, start_date, end_date=None, month_end=True, save=False, **kwargs):
        """
        计算指定日期的样本个股的因子载荷, 并保存至因子数据库
        Parameters:
        --------
        :param start_date: datetime-like, str
            开始日期, 格式:YYYY-MM-DD or YYYYMMDD
        :param end_date: datetime-like, str
            结束日期, 如果为None, 则只计算start_date日期的因子载荷, 格式: YYYY-MM-DD or YYYYMMDD
        :param month_end: bool, 默认为True
            如果为True, 则只计算月末时点的因子载荷
        :param save: bool, 默认为True
            是否保存至因子数据库
        :param kwargs:
        :return: dict
            因子载荷数据
        """
        # 取得交易日序列
        start_date = Utils.to_date(start_date)
        if end_date is not None:
            end_date = Utils.to_date(end_date)
            trading_days_series = Utils.get_trading_days(start=start_date, end=end_date)
        else:
            trading_days_series = Utils.get_trading_days(end=start_date, ndays=1)
        # 遍历交易日序列, 计算NLSIZE因子载荷
        dict_nlsize = None
        for calc_date in trading_days_series:
            if month_end and (not Utils.is_month_end(calc_date)):
                continue
            logging.info('[%s] Calc NLSIZE factor loading.' % Utils.datetimelike_to_str(calc_date))
            # 读取Size因子载荷数据
            lncap_data_path = os.path.join(factor_ct.FACTOR_DB.db_path, '{}_{}.csv'.format(risk_ct.SIZE_CT.db_file, Utils.datetimelike_to_str(calc_date, dash=False)))
            if not os.path.exists(lncap_data_path):
                logging.info('[%s] 的Size因子载荷数据不存在.' % Utils.datetimelike_to_str(calc_date))
                continue
            df_lncap = pd.read_csv(lncap_data_path, header=0)
            # Size因子数组
            arr_size = np.array(df_lncap['factorvalue'])
            # Size因子三次方数组
            arr_size_cube = arr_size ** 3
            # 相对Size因子正交化
            model = sm.OLS(arr_size_cube, arr_size)
            result = model.fit()
            # 对残差值进行缩尾处理和标准化
            n = len(result.resid)
            arr_resid = result.resid
            # arr_resid = result.resid.reshape(n, 1)
            # arr_resid_winsorized = Utils.clean_extreme_value(arr_resid)
            # arr_resid_standardized = Utils.normalize_data(arr_resid_winsorized)
            # 保存NLSIZE因子载荷数据
            dict_nlsize = dict({'date': df_lncap['date'].values, 'id': df_lncap['id'].values, 'factorvalue': arr_resid})
            if save:
                Utils.factor_loading_persistent(cls._db_file, Utils.datetimelike_to_str(calc_date, dash=False), dict_nlsize, ['date', 'id', 'factorvalue'])

        return dict_nlsize
Пример #3
0
def _calc_alphafactor_loading(start_date,
                              end_date=None,
                              factor_name=None,
                              multi_proc=False,
                              test=False):
    """
    计算alpha因子因子载荷值(原始载荷值及去极值标准化后载荷值)
    Parameters:
    --------
    :param start_date: datetime-like, str
        开始日期, e.g: YYYY-MM-DD, YYYYMMDD
    :param end_date: datetime-like, str, 默认为None
        结束日期, e.g: YYYY-MM-DD, YYYYMMDD
    :param factor_name: str, 默认为None
        alpha因子名称, e.g: SmartMoney
        factor_namea为None时, 计算所有alpha因子载荷值; 不为None时, 计算指定alpha因子的载荷值
    :param multi_proc: bool, 默认为None
        是否进行并行计算
    :param test: bool, 默认为False
        是否是进行因子检验
    :return: 保存因子载荷值(原始载荷值及去极值标准化后的载荷值)
    """
    # param_cons = eval('alphafactor_ct.'+factor_name.upper() + '_CT')
    start_date = Utils.to_date(start_date)
    if end_date is None:
        trading_days_series = Utils.get_trading_days(end=start_date, ndays=1)
    else:
        end_date = Utils.to_date(end_date)
        trading_days_series = Utils.get_trading_days(start=start_date,
                                                     end=end_date)

    for calc_date in trading_days_series:
        if factor_name is None:
            for alphafactor_name in alphafactor_ct.ALPHA_FACTORS:
                CAlphaFactor = eval(alphafactor_name + '()')
                CAlphaFactor.calc_factor_loading(calc_date,
                                                 month_end=True,
                                                 save=True,
                                                 multi_proc=multi_proc)
        else:
            if (not test) and (factor_name
                               not in alphafactor_ct.ALPHA_FACTORS):
                raise ValueError("alpha因子类: %s, 不存在." % factor_name)
            CAlphaFactor = eval(factor_name + '()')
            CAlphaFactor.calc_factor_loading(calc_date,
                                             month_end=True,
                                             save=True,
                                             multi_proc=multi_proc)
Пример #4
0
 def _optimize_periodmomentum_weight(cls, calc_date):
     """
     优化计算日内各时段动量因子载荷的权重
     Parameters:
     --------
     :param calc_date: datetime-like, str
         计算日期
     :return: pd.Series
     --------
         日内各时段动量因子载荷的优化权重向量
         0. date, 日期, datetimelike
         1. w0, 隔夜时段动量因子权重
         2. w1, 第1小时动量因子权重
         3. w2, 第2小时动量因子权重
         4. w3, 第3小时动量因子权重
         5. w4, 第4小时动量因子权重
     """
     calc_date = Utils.to_date(calc_date)
     # 读取过去60个月日内各时段动量因子IC时间序列值
     ic_filepath = os.path.join(SETTINGS.FACTOR_DB_PATH, alphafactor_ct.INTRADAYMOMENTUM_CT['factor_ic_file'])
     df_ic = pd.read_csv(ic_filepath, header=0, parse_dates=[0])
     df_ic = df_ic[df_ic['date'] <= calc_date].iloc[-60:]
     # 计算IC的均值和协方差矩阵
     df_ic.drop(columns='date', inplace=True)
     ic_mean = np.mat(df_ic.mean(axis=0)).reshape((df_ic.shape[1], 1))
     ic_cov = np.mat(df_ic.cov())
     # 计算日内时段因子的最优权重
     optimal_weights = ic_cov.I * ic_mean
     optimal_weights /= optimal_weights.sum()
     optimal_weights = np.array(optimal_weights).flatten().tolist()
     optimal_weights.insert(0, calc_date)
     optimal_weights = pd.Series(optimal_weights, index=['date', 'w0', 'w1', 'w2', 'w3', 'w4'])
     # 保存最优权重
     weight_filepath = os.path.join(SETTINGS.FACTOR_DB_PATH, alphafactor_ct.INTRADAYMOMENTUM_CT['optimal_weight_file'])
     Utils.save_timeseries_data(optimal_weights, weight_filepath, save_type='a', columns=['date', 'w0', 'w1', 'w2', 'w3', 'w4'])
Пример #5
0
    def _calc_factor_loading(cls, code, calc_date):
        """
        计算指定日期、指定个股EGRLF因子载荷
        Parameters:
        --------
        :param code: str
            个股代码, 如SH600000, 600000
        :param calc_date: datetime-like, str
            计算日期, 格式: YYYY-MM-DD
        :return: pd.Series
        --------
            个股的EGRLF因子载荷
            0. code
            1. egrlf
            如果计算失败, 返回None
        """
        code = Utils.code_to_symbol(code)
        calc_date = Utils.to_date(calc_date)
        # 读取个股的预期盈利增长率数据
        earningsgrowth_data = Utils.get_consensus_data(
            calc_date, code, ConsensusType.PredictedEarningsGrowth)
        if earningsgrowth_data is None:
            # 如果个股的预期盈利增长率数据不存在, 那么用过去3年净利润增长率代替
            hist_growth_data = Utils.get_hist_growth_data(code, calc_date, 3)
            if hist_growth_data is None:
                return None
            if np.isnan(hist_growth_data['netprofit']):
                return None
            egrlf = hist_growth_data['netprofit']
        else:
            egrlf = earningsgrowth_data['growth_2y']

        return pd.Series([code, egrlf], index=['code', 'egrlf'])
Пример #6
0
 def get_factor_weight(cls, date):
     """
     取得日内各时点动量因子的权重
     --------
     :param date: datetime-like or str
         日期
     :return: pd.Series
         各时点权重信息
     --------
         0. date: 日期
         1. w0: 第一个时点动量因子的权重
         2. w1: 第二个时点动量因子的权重
         3. w2: 第三个时点动量因子的权重
         4. w3: 第四个时点动量因子的权重
         5. w4: 第五个时点动量因子的权重
         读取不到数据,返回None
     """
     date = Utils.to_date(date)
     weight_file_path = os.path.join(
         factor_ct.FACTOR_DB.db_path,
         factor_ct.INTRADAYMOMENTUM_CT.optimal_weight_file)
     df_optimal_weight = pd.read_csv(weight_file_path,
                                     parse_dates=[0],
                                     header=0)
     df_optimal_weight.sort_values(by='date', inplace=True)
     df_optimal_weight = df_optimal_weight[df_optimal_weight.date <= date]
     if df_optimal_weight.shape[0] > 0:
         return df_optimal_weight.iloc[-1]
     else:
         return None
Пример #7
0
 def _calc_factor_loading(cls, code, calc_date):
     """
     计算指定日期、指定个股RSTR因子载荷
     Parameters:
     --------
     :param code: str
         个股代码, 如SH600000, 600000
     :param calc_date: datetime-like, str
         计算日期, 格式: YYYY-MM-DD
     :return: pd.Series
     --------
         个股的RSTR因子载荷
         0. code
         1. rstr
         如果计算失败, 返回None
     """
     # 取得个股复权行情数据
     df_secu_quote = Utils.get_secu_daily_mkt(code, end=calc_date, ndays=risk_ct.RSTR_CT.trailing_start+1, fq=True)
     if df_secu_quote is None:
         return None
     if len(df_secu_quote) < risk_ct.RSTR_CT.half_life:
         return None
     # 如果行情数据的起始日期距离计算日期的长度大于trailing_start的2倍, 返回None
     s = Utils.to_date(calc_date) - datetime.timedelta(days=risk_ct.RSTR_CT.trailing_start*2)
     if Utils.to_date(df_secu_quote.iloc[0]['date']) < s:
         return None
     df_secu_quote = df_secu_quote.head(len(df_secu_quote) - risk_ct.RSTR_CT.trailing_end)
     df_secu_quote.reset_index(drop=True, inplace=True)
     # 计算个股的日对数收益率
     arr_secu_close = np.array(df_secu_quote.iloc[1:]['close'])
     arr_secu_preclose = np.array(df_secu_quote.shift(1).iloc[1:]['close'])
     arr_secu_daily_ret = np.log(arr_secu_close / arr_secu_preclose)
     # 计算权重(指数移动加权平均)
     T = len(arr_secu_daily_ret)
     # time_spans = sorted(range(T), reverse=True)
     # alpha = 1 - np.exp(np.log(0.5)/risk_ct.RSTR_CT.half_life)
     # x = [1-alpha] * T
     # y = [alpha] * (T-1)
     # y.insert(0, 1)
     # weights = np.float_power(x, time_spans) * y
     weights = Algo.ewma_weight(T, risk_ct.RSTR_CT.half_life)
     # 计算RSTR
     rstr = np.sum(arr_secu_daily_ret * weights)
     return pd.Series([Utils.code_to_symbol(code), rstr], index=['code', 'rstr'])
Пример #8
0
 def _calc_factor_loading(cls, code, calc_date):
     """
     计算指定日期、指定个股的价值因子,包含ep_ttm, bp_lr, ocf_ttm
     Parameters:
     --------
     :param code: str
         个股代码:如600000或SH600000
     :param calc_date: datetime-like or str
         计算日期,格式YYYY-MM-DD, YYYYMMDD
     :return: pd.Series
     --------
         价值类因子值
         0. ep_ttm: TTM净利润/总市值
         1. bp_lr: 净资产(最新财报)/总市值
         2. ocf_ttm: TTM经营性现金流/总市值
         若计算失败,返回None
     """
     code = Utils.code_to_symbol(code)
     calc_date = Utils.to_date(calc_date)
     # 读取TTM财务数据
     ttm_fin_data = Utils.get_ttm_fin_basic_data(code, calc_date)
     if ttm_fin_data is None:
         return None
     # 读取最新财报数据
     report_date = Utils.get_fin_report_date(calc_date)
     fin_basic_data = Utils.get_fin_basic_data(code, report_date)
     if fin_basic_data is None:
         return None
     # 计算总市值
     mkt_daily = Utils.get_secu_daily_mkt(code,
                                          calc_date,
                                          fq=False,
                                          range_lookup=True)
     if mkt_daily.shape[0] == 0:
         return None
     cap_struct = Utils.get_cap_struct(code, calc_date)
     if cap_struct is None:
         return None
     total_cap = cap_struct.total - cap_struct.liquid_b - cap_struct.liquid_h
     total_mkt_cap = total_cap * mkt_daily.close
     # 计算价值类因子
     ep_ttm = ttm_fin_data[
         'NetProfit'] * util_ct.FIN_DATA_AMOUNT_UNIT / total_mkt_cap
     ocf_ttm = ttm_fin_data[
         'NetOperateCashFlow'] * util_ct.FIN_DATA_AMOUNT_UNIT / total_mkt_cap
     bp_lr = fin_basic_data[
         'ShareHolderEquity'] * util_ct.FIN_DATA_AMOUNT_UNIT / total_mkt_cap
     return Series([round(ep_ttm, 6),
                    round(bp_lr, 6),
                    round(ocf_ttm, 6)],
                   index=['ep_ttm', 'bp_lr', 'ocf_ttm'])
Пример #9
0
    def _calc_factor_loading(cls, code, calc_date):
        """
        计算指定日期、指定个股SGRO因子载荷
        Parameters:
        --------
        :param code: str
            个股代码, 如SH600000, 600000
        :param calc_date: datetime-like, str
            计算日期, 格式: YYYY-MM-DD
        :return: pd.Series
        --------
            个股的SGRO因子载荷
            0. code
            1. sgro
            如果计算失败, 返回None
        """
        code = Utils.code_to_symbol(code)
        calc_date = Utils.to_date(calc_date)
        # 读取过去5年的主要财务指标数据
        years = 5
        prevN_years_finbasicdata = _get_prevN_years_finbasicdata(
            calc_date, code, years)
        if prevN_years_finbasicdata is None:
            return None
        # 复权因子调整后的主营业务收入对年度t进行线性回归(OLS), 计算斜率beta
        arr_revenue = np.asarray([
            fin_basicdata['MainOperateRevenue']
            for fin_basicdata in prevN_years_finbasicdata
        ])
        if any(np.isnan(arr_revenue)):
            return None
        arr_t = np.arange(1, years + 1)
        arr_t = sm.add_constant(arr_t)
        model = sm.OLS(arr_revenue, arr_t)
        results = model.fit()
        beta = results.params[1]
        # 计算平均revenue
        avg_revenue = np.mean(arr_revenue)
        if abs(avg_revenue) < utils_con.TINY_ABS_VALUE:
            return None
        # sgro = beta / avg_revenue
        sgro = beta / avg_revenue

        return pd.Series([code, sgro], index=['code', 'sgro'])
Пример #10
0
    def _get_factor_weight(cls, date=None):
        """
        取得日内各时点动量因子的权重
        --------
        :param date: datetime-like or str
            日期, 默认为None
            如果date=None, 返回全部权重数据
        :return: pd.Series, pd.DataFrame
            各时点权重信息
        --------
            0. date: 日期
            1. w0: 第一个时点动量因子的权重
            2. w1: 第二个时点动量因子的权重
            3. w2: 第三个时点动量因子的权重
            4. w3: 第四个时点动量因子的权重
            5. w4: 第五个时点动量因子的权重
            读取不到数据,返回None
        """

        weight_file_path = os.path.join(SETTINGS.FACTOR_DB_PATH, alphafactor_ct.INTRADAYMOMENTUM_CT.optimal_weight_file)
        if not os.path.isfile(weight_file_path):
            return None
        df_optimal_weight = pd.read_csv(weight_file_path, parse_dates=[0], header=0)
        df_optimal_weight.sort_values(by='date', inplace=True)

        if date is None:
            if df_optimal_weight.empty:
                return None
            else:
                return df_optimal_weight
        else:
            date = Utils.to_date(date)
            df_weight = df_optimal_weight[df_optimal_weight.date <= date]
            if df_weight.shape[0] > 0:
                return df_weight.iloc[-1]
            else:
                df_weight = df_optimal_weight[df_optimal_weight.date >= date]
                if df_weight.shape[0] > 0:
                    return df_weight.iloc[0]
                else:
                    return None
Пример #11
0
def calc_suspension_info(date):
    """
    计算个股停牌信息
    Parameters:
    --------
    :param date: datetime-like, str
        计算日期, e.g: YYYY-MM-DD, YYYYMMDD
    :return:
    """
    # TODO 可以更改为从tushare.pro接口取得个股停牌信息

    date = Utils.to_date(date)
    df_stock_basics = Utils.get_stock_basics(date)
    df_stock_basics['trading_status'] = df_stock_basics.apply(lambda x: Utils.trading_status(x['symbol'], date), axis=1)
    df_stock_basics = df_stock_basics[df_stock_basics['trading_status'] == SecuTradingStatus.Suspend]
    df_stock_basics.drop(columns='trading_status', inplace=True)

    cfg = ConfigParser()
    cfg.read('config.ini')
    suspension_info_path = os.path.join(SETTINGS.FACTOR_DB_PATH, cfg.get('suspension_info', 'info_path'), '{}.csv'.format(Utils.datetimelike_to_str(date, dash=False)))
    df_stock_basics.to_csv(suspension_info_path, index=False, encoding='utf-8')
Пример #12
0
 def _calc_factor_loading(cls, code, calc_date):
     """
     计算指定日期、指定个股的成长因子,包含npg_ttm, opg_ttm
     Parameters:
     --------
     :param code: str
         个股代码,如600000或SH600000
     :param calc_date: datetime-like or str
         计算日期,格式YYYY-MM-DD, YYYYMMDD
     :return: pd.Series
     --------
         成长类因子值
         0. id: 证券代码
         1. npg_ttm: 净利润增长率_TTM
         2. opg_ttm: 营业收入增长率_TTM
         若计算失败, 返回None
     """
     code = Utils.code_to_symbol(code)
     calc_date = Utils.to_date(calc_date)
     # 读取最新的TTM财务数据
     ttm_fin_data_latest = Utils.get_ttm_fin_basic_data(code, calc_date)
     if ttm_fin_data_latest is None:
         return None
     # 读取去年同期TTM财务数据
     try:
         pre_date = datetime.datetime(calc_date.year-1, calc_date.month, calc_date.day)
     except ValueError:
         pre_date = calc_date - datetime.timedelta(days=366)
     ttm_fin_data_pre = Utils.get_ttm_fin_basic_data(code, pre_date)
     if ttm_fin_data_pre is None:
         return None
     # 计算成长类因子值
     if abs(ttm_fin_data_pre['NetProfit']) < 0.1:
         return None
     npg_ttm = (ttm_fin_data_latest['NetProfit'] - ttm_fin_data_pre['NetProfit']) / abs(ttm_fin_data_pre['NetProfit'])
     if abs(ttm_fin_data_pre['MainOperateRevenue']) < 0.1:
         return None
     opg_ttm = (ttm_fin_data_latest['MainOperateRevenue'] - ttm_fin_data_pre['MainOperateRevenue']) / abs(ttm_fin_data_pre['MainOperateRevenue'])
     return Series([code, round(npg_ttm, 4), round(opg_ttm, 4)], index=['id', 'npg_ttm', 'opg_ttm'])
Пример #13
0
 def _calc_factor_loading(cls, code, calc_date):
     """
     计算指定日期、指定个股的规模因子值
     Parameters:
     --------
     :param code: str
         个股代码,如600000、SH600000
     :param calc_date: datetime-like, str
         规模因子计算日期,格式YYYY-MM-DD或YYYYMMDD
     :return: pd.Series
     --------
         个股规模因子值,各个index对应的含义如下:
         0. LnTotalMktCap: 总市值对数
         1. LnLiquidMktCap: 流通市值对数
         若计算失败,返回None
     """
     # 取得证券截止指定日期最新的非复权行情数据
     code = Utils.code_to_symbol(code)
     calc_date = Utils.to_date(calc_date)
     mkt_daily = Utils.get_secu_daily_mkt(code,
                                          calc_date,
                                          fq=False,
                                          range_lookup=True)
     if mkt_daily.shape[0] == 0:
         return None
     # 取得证券截止指定日期前最新的股本结构数据
     cap_struct = Utils.get_cap_struct(code, calc_date)
     if cap_struct is None:
         return None
     # 计算证券的规模因子
     scale_factor = Series()
     total_cap = cap_struct.total - cap_struct.liquid_b - cap_struct.liquid_h
     scale_factor['LnTotalMktCap'] = math.log(total_cap * mkt_daily.close)
     scale_factor['LnLiquidMktCap'] = math.log(cap_struct.liquid_a *
                                               mkt_daily.close)
     return scale_factor
Пример #14
0
    def calc_factor_loading(cls,
                            start_date,
                            end_date=None,
                            month_end=True,
                            save=False,
                            **kwargs):
        """
        计算指定日期的样本股的因子载荷,并保存至因子数据库
        Parameters:
        --------
        :param start_date: datetime-like or str
            开始日期,格式:YYYY-MM-DD or YYYYMMDD
        :param end_date: datetime-like or str
            结束日期,格式:YYYY-MM-DD or YYYYMMDD
        :param month_end: bool, 默认True
            如果为True,则只计算月末时点的因子载荷;否则每个交易日都计算
        :param save: bool, 默认False
            是否保存至因子数据库
        :return: 因子载荷,DataFrame
        --------
            因子载荷,DataFrame
            0. date: 日期
            1. id: 证券symbol
            2. LnTotalMktCap: 总市值对数值
            3. LnLiquidMktCap: 流通市值对数值
        """
        # 取得交易日序列股票基本信息表
        start_date = Utils.to_date(start_date)
        if end_date is not None:
            end_date = Utils.to_date(end_date)
            trading_days_series = Utils.get_trading_days(start=start_date,
                                                         end=end_date)
        else:
            trading_days_series = Utils.get_trading_days(end=start_date,
                                                         ndays=1)
        all_stock_basics = CDataHandler.DataApi.get_secu_basics()
        # 遍历交易日序列,计算规模因子值
        dict_scale = None
        for calc_date in trading_days_series:
            if month_end and (not Utils.is_month_end(calc_date)):
                continue
            dict_scale = {
                'date': [],
                'id': [],
                'LnTotalMktCap': [],
                'LnLiquidMktCap': []
            }
            # 遍历个股,计算个股规模因子值
            s = (calc_date - datetime.timedelta(days=90)).strftime('%Y%m%d')
            stock_basics = all_stock_basics[all_stock_basics.list_date < s]

            # 采用单进程进行计算规模因子
            # for _, stock_info in stock_basics.iterrows():
            #     scale_data = cls._calc_factor_loading(stock_info.symbol, calc_date)
            #     if scale_data is not None:
            #         logging.info("[%s] %s's total mkt cap = %.0f, liquid mkt cap = %.0f" % (calc_date.strftime('%Y-%m-%d'), stock_info.symbol, scale_data.LnTotalMktCap, scale_data.LnLiquidMktCap))
            #         dict_scale['id'].append(Utils.code_to_symbol(stock_info.symbol))
            #         dict_scale['LnTotalMktCap'].append(round(scale_data.LnTotalMktCap, 4))
            #         dict_scale['LnLiquidMktCap'].append(round(scale_data.LnLiquidMktCap, 4))

            # 采用多进程并行计算规模因子
            q = Manager().Queue()  # 队列,用于进程间通信,存储每个进程计算的规模因子值
            p = Pool(4)  # 进程池,最多同时开启4个进程
            for _, stock_info in stock_basics.iterrows():
                p.apply_async(cls._calc_factor_loading_proc,
                              args=(
                                  stock_info.symbol,
                                  calc_date,
                                  q,
                              ))
            p.close()
            p.join()
            while not q.empty():
                scale_data = q.get(True)
                dict_scale['id'].append(scale_data[0])
                dict_scale['LnTotalMktCap'].append(round(scale_data[1], 4))
                dict_scale['LnLiquidMktCap'].append(round(scale_data[2], 4))

            date_label = Utils.get_trading_days(start=calc_date, ndays=2)[1]
            dict_scale['date'] = [date_label] * len(dict_scale['id'])
            # 保存规模因子载荷至因子数据库
            if save:
                Utils.factor_loading_persistent(cls._db_file,
                                                calc_date.strftime('%Y%m%d'),
                                                dict_scale)
            # 休息60秒
            logging.info('Suspending for 60s.')
            time.sleep(60)
        return dict_scale
Пример #15
0
    def calc_factor_loading(cls,
                            start_date,
                            end_date=None,
                            month_end=True,
                            save=False,
                            **kwargs):
        """
        计算指定日期的样本个股的因子载荷,并保存至因子数据库
        Parameters
        --------
        :param start_date: datetime-like, str
            开始日期
        :param end_date: datetime-like, str,默认None
            结束日期,如果为None,则只计算start_date日期的因子载荷
        :param month_end: bool,默认True
            只计算月末时点的因子载荷
        :param save: 是否保存至因子数据库,默认为False
        :return: 因子载荷,DataFrame
        --------
            因子载荷,DataFrame
            0: ID, 证券ID,为索引
            1: factorvalue, 因子载荷
            如果end_date=None,返回start_date对应的因子载荷数据
            如果end_date!=None,返回最后一天的对应的因子载荷数据
            如果没有计算数据,返回None
        """
        # 0.取得交易日序列
        start_date = Utils.to_date(start_date)
        if end_date is not None:
            end_date = Utils.to_date(end_date)
            trading_days_series = Utils.get_trading_days(start=start_date,
                                                         end=end_date)
        else:
            trading_days_series = Utils.get_trading_days(end=start_date,
                                                         ndays=1)
        # 取得样本个股信息
        all_stock_basics = CDataHandler.DataApi.get_secu_basics()
        # 遍历交易日序列,计算SMartQ因子载荷
        dict_factor = None
        for calc_date in trading_days_series:
            dict_factor = {'id': [], 'factorvalue': []}
            if month_end and (not Utils.is_month_end(calc_date)):
                continue
            # 1.获取用于读取分钟行情的交易日列表(过去30天的交易日列表,降序排列)
            # trading_days = _get_trading_days(calc_date, 30)
            # trading_days = Utils.get_trading_days(end=calc_date, ndays=30, ascending=False)
            # 2.取得样本个股信息
            # stock_basics = ts.get_stock_basics()
            s = (calc_date - datetime.timedelta(days=90)).strftime('%Y%m%d')
            stock_basics = all_stock_basics[all_stock_basics.list_date < s]
            # 3.遍历样本个股代码,计算Smart_Q因子载荷值
            dict_factor = {'id': [], 'factorvalue': []}

            # 采用单进程进行计算
            # for _, stock_info in stock_basics.iterrows():
            #     # code = '%s%s' % ('SH' if code[:2] == '60' else 'SZ', code)
            #     factor_loading = cls._calc_factor_loading(stock_info.symbol, calc_date)
            #     print("[%s]Calculating %s's SmartMoney factor loading = %.4f." % (calc_date.strftime('%Y-%m-%d'), stock_info.symbol, -1.0 if factor_loading is None else factor_loading))
            #     if factor_loading is not None:
            #         # df_factor.ix[code, 'factorvalue'] = factor_loading
            #         dict_factor['id'].append(Utils.code_to_symbol(stock_info.symbol))
            #         dict_factor['factorvalue'].append(factor_loading)

            # 采用多进程并行计算SmartQ因子载荷
            q = Manager().Queue()  # 队列,用于进程间通信,存储每个进程计算的因子载荷值
            p = Pool(4)  # 进程池,最多同时开启4个进程
            for _, stock_info in stock_basics.iterrows():
                p.apply_async(cls._calc_factor_loading_proc,
                              args=(
                                  stock_info.symbol,
                                  calc_date,
                                  q,
                              ))
            p.close()
            p.join()
            while not q.empty():
                smart_q = q.get(True)
                dict_factor['id'].append(smart_q[0])
                dict_factor['factorvalue'].append(smart_q[1])

            date_label = Utils.get_trading_days(calc_date, ndays=2)[1]
            dict_factor['date'] = [date_label] * len(dict_factor['id'])
            # 4.保存因子载荷至因子数据库
            if save:
                # db = shelve.open(cls._db_file, flag='c', protocol=None, writeback=False)
                # try:
                #     db[calc_date.strftime('%Y%m%d')] = df_factor
                # finally:
                #     db.close()
                Utils.factor_loading_persistent(cls._db_file,
                                                calc_date.strftime('%Y%m%d'),
                                                dict_factor)
            # 休息300秒
            logging.info('Suspending for 360s.')
            time.sleep(360)
        return dict_factor
Пример #16
0
    def calc_factor_loading_(cls,
                             start_date,
                             end_date=None,
                             month_end=True,
                             save=False,
                             **kwargs):
        """
        计算指定日期的样本个股的因子载荷, 并保存至因子数据库
        Parameters:
        --------
        :param start_date: datetime-like, str
            开始日期, 格式: YYYY-MM-DD or YYYYMMDD
        :param end_date: datetime-like, str
            结束日期, 如果为None, 则只计算start_date日期的因子载荷, 格式: YYYY-MM-DD or YYYYMMDD
        :param month_end: bool, 默认为True
            如果为True, 则只计算月末时点的因子载荷
        :param save: bool, 默认为True
            是否保存至因子数据库
        :param kwargs:
            'multi_proc': bool, True=采用多进程, False=采用单进程, 默认为False
        :return: dict
            因子载荷数据
        """
        # 取得交易日序列
        start_date = Utils.to_date(start_date)
        if end_date is not None:
            end_date = Utils.to_date(end_date)
            trading_days_series = Utils.get_trading_days(start=start_date,
                                                         end=end_date)
        else:
            trading_days_series = Utils.get_trading_days(end=start_date,
                                                         ndays=1)
        # 遍历交易日序列, 计算growth因子下各个成分因子的因子载荷
        if 'multi_proc' not in kwargs:
            kwargs['multi_proc'] = False
        for calc_date in trading_days_series:
            if month_end and (not Utils.is_month_end(calc_date)):
                continue
            # 计算各成分因子的因子载荷
            for com_factor in risk_ct.GROWTH_CT.component:
                factor = eval(com_factor + '()')
                factor.calc_factor_loading(start_date=calc_date,
                                           end_date=None,
                                           month_end=month_end,
                                           save=save,
                                           multi_proc=kwargs['multi_proc'])
            # 合成Growth因子载荷
            growth_factor = pd.DataFrame()
            df_industry_classify = Utils.get_industry_classify()  # 个股行业分类数据
            for com_factor in risk_ct.GROWTH_CT.component:
                factor_path = os.path.join(
                    factor_ct.FACTOR_DB.db_path,
                    eval('risk_ct.' + com_factor + '_CT')['db_file'])
                factor_loading = Utils.read_factor_loading(
                    factor_path,
                    Utils.datetimelike_to_str(calc_date, dash=False))
                factor_loading.drop(columns='date', inplace=True)
                # factor_loading[com_factor] = Utils.normalize_data(Utils.clean_extreme_value(np.array(factor_loading['factorvalue']).reshape((len(factor_loading), 1))))
                # factor_loading.drop(columns='factorvalue', inplace=True)
                factor_loading.rename(columns={'factorvalue': com_factor},
                                      inplace=True)
                # 添加行业分类数据
                factor_loading = pd.merge(
                    left=factor_loading,
                    right=df_industry_classify[['id', 'ind_code']],
                    how='inner',
                    on='id')
                # 取得含缺失值的因子载荷数据
                missingdata_factor = factor_loading[
                    factor_loading[com_factor].isna()]
                # 删除factor_loading中的缺失值
                factor_loading.dropna(axis='index', how='any', inplace=True)
                # 对factor_loading去极值、标准化
                factor_loading = Utils.normalize_data(factor_loading,
                                                      id='id',
                                                      columns=com_factor,
                                                      treat_outlier=True,
                                                      weight='cap',
                                                      calc_date=calc_date)
                # 把missingdata_factor中的缺失值替换为行业均值
                ind_codes = set(missingdata_factor['ind_code'])
                ind_mean_factor = {}
                for ind_code in ind_codes:
                    ind_mean_factor[ind_code] = factor_loading[
                        factor_loading['ind_code'] ==
                        ind_code][com_factor].mean()
                for idx, missingdata in missingdata_factor.iterrows():
                    missingdata_factor.loc[idx, com_factor] = ind_mean_factor[
                        missingdata['ind_code']]
                # 把missingdata_factor和factor_loading合并
                factor_loading = pd.concat(
                    [factor_loading, missingdata_factor])
                # 删除ind_code列
                factor_loading.drop(columns='ind_code', inplace=True)

                if growth_factor.empty:
                    growth_factor = factor_loading
                else:
                    growth_factor = pd.merge(left=growth_factor,
                                             right=factor_loading,
                                             how='inner',
                                             on='id')

            # # 读取个股行业分类数据, 添加至growth_factor中
            # df_industry_classify = Utils.get_industry_classify()
            # growth_factor = pd.merge(left=growth_factor, right=df_industry_classify[['id', 'ind_code']])
            # # 取得含缺失值的因子载荷数据
            # missingdata_factor = growth_factor.loc[[ind for ind, data in growth_factor.iterrows() if data.hasnans]]
            # # 删除growth_factot中的缺失值
            # growth_factor.dropna(axis='index', how='any', inplace=True)
            # # 对growth_factor去极值、标准化
            # growth_factor = Utils.normalize_data(growth_factor, id='id', columns=risk_ct.GROWTH_CT.component, treat_outlier=True, weight='cap', calc_date=calc_date)
            # # 把missingdata_factor中的缺失值替换为行业均值
            # ind_codes = set(missingdata_factor['ind_code'])
            # ind_mean_factor = {}
            # for ind_code in ind_codes:
            #     ind_mean_factor[ind_code] = growth_factor[growth_factor['ind_code'] == ind_code].mean()
            # missingdata_label = {ind: missingdata_factor.columns[missingdata.isna()].tolist() for ind, missingdata in missingdata_factor.iterrows()}
            # for ind, cols in missingdata_label.items():
            #     missingdata_factor.loc[ind, cols] = ind_mean_factor[missingdata_factor.loc[ind, 'ind_code']][cols]
            # # 把missingdata_factor和growth_factor合并
            # growth_factor = pd.concat([growth_factor, missingdata_factor])
            # # 删除ind_code列
            # growth_factor.drop(columns='ind_code', inplace=True)

            # 合成Growth因子
            growth_factor.set_index('id', inplace=True)
            weight = pd.Series(risk_ct.GROWTH_CT.weight)
            growth_factor = (growth_factor * weight).sum(axis=1)
            growth_factor.name = 'factorvalue'
            growth_factor.index.name = 'id'
            growth_factor = pd.DataFrame(growth_factor)
            growth_factor.reset_index(inplace=True)
            growth_factor['date'] = Utils.get_trading_days(start=calc_date,
                                                           ndays=2)[1]
            # 保存growth因子载荷
            if save:
                Utils.factor_loading_persistent(
                    cls._db_file,
                    Utils.datetimelike_to_str(calc_date, dash=False),
                    growth_factor.to_dict('list'),
                    ['date', 'id', 'factorvalue'])
Пример #17
0
def _calc_mvpfp_summary(factor_name, calc_date):
    """
    计算最小波动纯因子组合的汇总绩效数据
    Parameters:
    --------
    :param factor_name: str
        alpha因子名称, e.g: SmartMoney
    :param calc_date: datetime-like, str
        计算日期, e.g: YYYY-MM-DD, YYYYMMDD
    :return:
        计算汇总绩效数据, 并保存
    """
    calc_date = Utils.to_date(calc_date)
    dailyperformance_filepath = os.path.join(
        SETTINGS.FACTOR_DB_PATH,
        eval('alphafactor_ct.' + factor_name.uppper() + '.CT')['db_file'],
        'performance/performance_daily.csv')
    df_daily_performance = pd.read_csv(dailyperformance_filepath,
                                       parse_dates=[0],
                                       header=0)
    df_daily_performance = df_daily_performance[
        df_daily_performance['date'] <= calc_date]

    monthlyperformance_filepath = os.path.join(
        SETTINGS.FACTOR_DB_PATH,
        eval('alphafactor_ct.' + factor_name.upper() + '.CT')['db_file'],
        'performance/performance_monthly.csv')
    df_monthly_performance = pd.read_csv(monthlyperformance_filepath,
                                         parse_dates=[0],
                                         header=0)
    df_monthly_performance = df_monthly_performance[
        df_monthly_performance['date'] <= calc_date]
    if len(df_monthly_performance) < 12:
        logging.info("alpha因子'%s'的历史月度绩效数据长度小于12个月, 不计算汇总绩效数据." % factor_name)
        return
    summary_performance = pd.Series(
        index=alphamodel_ct.FACTOR_PERFORMANCE_HEADER['summary_performance'])
    for k in alphamodel_ct.SUMMARY_PERFORMANCE_MONTH_LENGTH:
        if k == 'total':
            daily_performance = df_daily_performance
            monthly_performance = df_monthly_performance
            summary_performance['type'] = k
        else:
            if not isinstance(k, int):
                raise TypeError("计算因子汇总绩效的时间区间类型除了'total'外, 应该为整型.")
            if len(df_monthly_performance) >= k:
                monthly_performance = df_monthly_performance.iloc[-k:]
            else:
                logging.info(
                    "alpha因子'%s'的历史月度绩效数据长度小于%d个月, 不予计算该历史时间长度的汇总绩效." %
                    (factor_name, k))
                continue
            daily_performance = df_daily_performance[
                df_daily_performance['date'] >= monthly_performance.iloc[0]
                ['date']]
            summary_performance['type'] = str(k) + 'm'

        summary_performance['date'] = daily_performance.iloc[-1]['date']
        summary_performance['total_ret'] = daily_performance.iloc[-1][
            'nav'] / daily_performance.iloc[0]['nav'] - 1.0
        summary_performance['annual_ret'] = math.pow(
            summary_performance['total_ret'] + 1, 12 / k)
        summary_performance['volatility'] = np.std(
            daily_performance['daily_ret']) * math.sqrt(250)
        summary_performance['monthly_winrate'] = len(
            monthly_performance[monthly_performance['monthly_ret'] > 0]) / k
        summary_performance['IR'] = summary_performance[
            'annual_ret'] / summary_performance['volatility']

        fmax_drawdown = 0.0
        for m in range(1, len(daily_performance)):
            fdrawdown = daily_performance.iloc[m]['nav'] / max(
                daily_performance.iloc[:m]['nav']) - 1.0
            if fdrawdown < fmax_drawdown:
                fmax_drawdown = fdrawdown
        summary_performance['max_drawdown'] = fmax_drawdown

        _save_mvpfp_performance(summary_performance, factor_name, 'summary',
                                'a')
Пример #18
0
    def _calc_synthetic_factor_loading(cls,
                                       start_date,
                                       end_date=None,
                                       month_end=True,
                                       save=False,
                                       **kwargs):
        """
        计算指定日期的样本个股的合成因子的载荷,并保存至因子数据库
        Parameters
        --------
        :param start_date: datetime-like, str
            开始日期
        :param end_date: datetime-like, str,默认None
            结束日期,如果为None,则只计算start_date日期的因子载荷
        :param month_end: bool,默认True
            只计算月末时点的因子载荷,该参数只在end_date不为None时有效,并且不论end_date是否为None,都会计算第一天的因子载荷
        :param save: 是否保存至因子数据库,默认为False
        :param kwargs:
            'multi_proc': bool, True=采用多进程, False=采用单进程, 默认为False
            'com_factors': list, 成分因子的类实例list
        :return: 因子载荷,DataFrame
        --------
            因子载荷,DataFrame
            0: ID, 证券ID,为索引
            1: factorvalue, 因子载荷
        """
        # 取得交易日序列
        start_date = Utils.to_date(start_date)
        if end_date is not None:
            end_date = Utils.to_date(end_date)
            trading_days_series = Utils.get_trading_days(start=start_date,
                                                         end=end_date)
        else:
            trading_days_series = Utils.get_trading_days(end=start_date,
                                                         ndays=1)
        # 遍历交易日序列, 计算合成因子下各个成分因子的因子载荷
        if 'multi_proc' not in kwargs:
            kwargs['multi_proc'] = False
        for calc_date in trading_days_series:
            if month_end and (not Utils.is_month_end(calc_date)):
                continue
            # 计算各成分因子的因子载荷
            # for com_factor in eval('risk_ct.' + cls.__name__.upper() + '_CT')['component']:
            #     factor = eval(com_factor + '()')
            #     factor.calc_factor_loading(start_date=calc_date, end_date=None, month_end=month_end, save=save, multi_proc=kwargs['multi_proc'])
            for com_factor in kwargs['com_factors']:
                com_factor.calc_factor_loading(start_date=calc_date,
                                               end_date=None,
                                               month_end=month_end,
                                               save=save,
                                               multi_proc=kwargs['multi_proc'])
            # 计算合成因子
            synthetic_factor = pd.DataFrame()
            df_industry_classify = Utils.get_industry_classify()  # 个股行业分类数据
            for com_factor in eval('risk_ct.' + cls.__name__.upper() +
                                   '_CT')['component']:
                factor_path = os.path.join(
                    factor_ct.FACTOR_DB.db_path,
                    eval('risk_ct.' + com_factor + '_CT')['db_file'])
                factor_loading = Utils.read_factor_loading(
                    factor_path,
                    Utils.datetimelike_to_str(calc_date, dash=False))
                factor_loading.drop(columns='date', inplace=True)
                factor_loading.rename(columns={'factorvalue': com_factor},
                                      inplace=True)
                # 添加行业分类数据
                factor_loading = pd.merge(
                    left=factor_loading,
                    right=df_industry_classify[['id', 'ind_code']],
                    how='inner',
                    on='id')
                # 取得含缺失值的因子载荷数据
                missingdata_factor = factor_loading[
                    factor_loading[com_factor].isna()]
                # 删除factor_loading中的缺失值
                factor_loading.dropna(axis='index', how='any', inplace=True)
                # 对factor_loading去极值、标准化
                factor_loading = Utils.normalize_data(factor_loading,
                                                      id='id',
                                                      columns=com_factor,
                                                      treat_outlier=True,
                                                      weight='cap',
                                                      calc_date=calc_date)
                # 把missingdata_factor中的缺失值替换为行业均值
                ind_codes = set(missingdata_factor['ind_code'])
                ind_mean_factor = {}
                for ind_code in ind_codes:
                    ind_mean_factor[ind_code] = factor_loading[
                        factor_loading['ind_code'] ==
                        ind_code][com_factor].mean()
                for idx, missingdata in missingdata_factor.iterrows():
                    missingdata_factor.loc[idx, com_factor] = ind_mean_factor[
                        missingdata['ind_code']]
                # 把missingdata_factor和factor_loading合并
                factor_loading = pd.concat(
                    [factor_loading, missingdata_factor])
                # 删除ind_code列
                factor_loading.drop(columns='ind_code', inplace=True)
                # merge成分因子
                if synthetic_factor.empty:
                    synthetic_factor = factor_loading
                else:
                    synthetic_factor = pd.merge(left=synthetic_factor,
                                                right=factor_loading,
                                                how='inner',
                                                on='id')

            # 合成因子
            synthetic_factor.set_index('id', inplace=True)
            weight = pd.Series(
                eval('risk_ct.' + cls.__name__.upper() + '_CT')['weight'])
            synthetic_factor = (synthetic_factor * weight).sum(axis=1)
            synthetic_factor.name = 'factorvalue'
            synthetic_factor.index.name = 'id'
            synthetic_factor = pd.DataFrame(synthetic_factor)
            synthetic_factor.reset_index(inplace=True)
            synthetic_factor['date'] = Utils.get_trading_days(start=calc_date,
                                                              ndays=2)[1]
            # 保存synthetic_factor因子载荷
            if save:
                Utils.factor_loading_persistent(
                    cls._db_file,
                    Utils.datetimelike_to_str(calc_date, dash=False),
                    synthetic_factor.to_dict('list'),
                    ['date', 'id', 'factorvalue'])
Пример #19
0
    def calc_factor_loading(cls,
                            start_date,
                            end_date=None,
                            month_end=True,
                            save=False,
                            **kwargs):
        """
        计算指定日期的样本个股的因子载荷, 并保存至因子数据库
        Parameters:
        --------
        :param start_date: datetime-like or str
            开始日期, 格式: YYYY-MM-DD or YYYYMMDD
        :param end_date: datetime-like, str
            结束日期, 如果为None, 则只计算start_date日期的因子载荷, 格式:YYYY-MM-DD or YYYYMMDD
        :param month_end: bool, 默认True
            如果为True, 则只计算月末时点的因子载荷
        :param save: bool, 默认True
            是否保存至因子数据库
        :param kwargs:
        :return: dict
            因子载荷
        --------
        """
        # 取得交易日序列及股票基本信息表
        start_date = Utils.to_date(start_date)
        if end_date is not None:
            end_date = Utils.to_date(end_date)
            trading_days_series = Utils.get_trading_days(start=start_date,
                                                         end=end_date)
        else:
            trading_days_series = Utils.get_trading_days(end=start_date,
                                                         ndays=1)
        all_stock_basics = CDataHandler.DataApi.get_secu_basics()
        # 遍历交易日序列, 计算筹码分布因子载荷
        dict_cyq = {}
        for calc_date in trading_days_series:
            if month_end and (not Utils.is_month_end(calc_date)):
                continue
            logging.info('[%s] Calc CYQ factor loading.' %
                         Utils.datetimelike_to_str(calc_date))
            # 遍历个股, 计算个股筹码分布因子值
            s = (calc_date - datetime.timedelta(days=180)).strftime('%Y%m%d')
            stock_basics = all_stock_basics[all_stock_basics.list_date < s]

            secu_cyq_path = Path(
                factor_ct.FACTOR_DB.db_path, factor_ct.CYQ_CT.db_file,
                'secu_cyq/%s' % calc_date.strftime('%Y-%m-%d'))
            if not secu_cyq_path.exists():
                secu_cyq_path.mkdir()
            ids = []
            rps = []

            # 采用单进程计算筹码分布数据, 及当前价格的相对位置(=当前价格-平均成本)/平均成本
            # for _, stock_info in stock_basics.iterrows():
            #     logging.info("[%s] Calc %s's cyq data." % (calc_date.strftime('%Y-%m-%d'), stock_info.symbol))
            #     secu_cyq = cls._calc_factor_loading(stock_info.symbol, calc_date)
            #     if secu_cyq is not None:
            #         secu_code, secu_close, cyq_data = secu_cyq
            #         # 保存个股的筹码分布数据
            #         cyq_data.to_csv(Path(secu_cyq_path, '%s.csv' % secu_code), header=True)
            #         # 计算当前价格的相对位置
            #         avg_cyq = np.sum(np.array(cyq_data.index) * np.array(cyq_data.values))
            #         relative_position = round((secu_close - avg_cyq) / avg_cyq, 4)
            #         ids.append(secu_code)
            #         rps.append(relative_position)

            # 采用多进程进行并行计算筹码分布数据, 及当前价格的相对位置(=当前价格-平均成本)/平均成本
            q = Manager().Queue()  # 队列, 用于进程间通信, 存储每个进程计算的因子载荷
            p = Pool(4)  # 进程池, 最多同时开启4个进程
            for _, stock_info in stock_basics.iterrows():
                p.apply_async(cls._calc_factor_loading_proc,
                              args=(
                                  stock_info.symbol,
                                  calc_date,
                                  q,
                              ))
            p.close()
            p.join()
            while not q.empty():
                secu_cyq = q.get(True)
                secu_code, secu_close, cyq_data = secu_cyq
                # 保存个股的筹码分布数据
                cyq_data.to_csv(Path(secu_cyq_path, '%s.csv' % secu_code),
                                header=True)
                # 计算当前价格的相对位置
                avg_cyq = np.sum(
                    np.array(cyq_data.index) * np.array(cyq_data.values))
                relative_position = round((secu_close - avg_cyq) / avg_cyq, 4)
                ids.append(secu_code)
                rps.append(relative_position)

            date_label = Utils.get_trading_days(calc_date, ndays=2)[1]
            dict_cyq = {
                'date': [date_label] * len(ids),
                'id': ids,
                'factorvalue': rps
            }
            if save:
                cyq_data_path = os.path.join(factor_ct.FACTOR_DB.db_path,
                                             factor_ct.CYQ_CT.db_file,
                                             factor_ct.CYQ_CT.CYQ_rp_file)
                Utils.factor_loading_persistent(
                    cyq_data_path,
                    Utils.datetimelike_to_str(calc_date, dash=False), dict_cyq,
                    ['date', 'id', 'factorvalue'])
            # 休息90秒
            logging.info('Suspending for 100s.')
            time.sleep(100)
        return dict_cyq
Пример #20
0
    def calc_factor_loading1(cls,
                             start_date,
                             end_date=None,
                             month_end=True,
                             save=False,
                             **kwargs):
        """
        计算指定日期的样本个股的因子载荷, 并保存至因子数据库
        Parameters:
        --------
        :param start_date: datetime-like or str
            开始日期, 格式: YYYY-MM-DD or YYYYMMDD
        :param end_date: datetime-like, str
            结束日期, 如果为None, 则只计算start_date日期的因子载荷, 格式:YYYY-MM-DD or YYYYMMDD
        :param month_end: bool, 默认True
            如果为True, 则只计算月末时点的因子载荷
        :param save: bool, 默认True
            是否保存至因子数据库
        :param kwargs:
        :return: dict
            因子载荷
        --------
        """
        # 取得交易日序列及股票基本信息表
        start_date = Utils.to_date(start_date)
        if end_date is not None:
            end_date = Utils.to_date(end_date)
            trading_days_series = Utils.get_trading_days(start=start_date,
                                                         end=end_date)
        else:
            trading_days_series = Utils.get_trading_days(end=start_date,
                                                         ndays=1)
        all_stock_basics = CDataHandler.DataApi.get_secu_basics()
        # 遍历交易日序列, 计算筹码分布因子载荷
        dict_cyq = None
        for calc_date in trading_days_series:
            if month_end and (not Utils.is_month_end(calc_date)):
                continue
            logging.info('[%s] Calc CYQ factor loading.' %
                         Utils.datetimelike_to_str(calc_date))
            # 遍历个股, 计算个股筹码分布因子值
            df_proxies = DataFrame()
            s = (calc_date - datetime.timedelta(days=365)).strftime('%Y%m%d')
            stock_basics = all_stock_basics[all_stock_basics.list_date < s]

            trading_day = Utils.get_trading_days(calc_date, ndays=2)[1]
            # 采用单进程计算筹码因子分布的代理变量
            # for _, stock_info in stock_basics.iterrows():
            #     cyq_proxies = cls._calc_factor_loading(stock_info.symbol, calc_date)
            #     if cyq_proxies is not None:
            #         logging.info("[%s] %s's cyq proxies = (%0.4f,%0.4f,%0.4f,%0.4f,%0.4f)" % (calc_date.strftime('%Y-%m-%d'), stock_info.symbol, cyq_proxies['arc'], cyq_proxies['vrc'], cyq_proxies['src'], cyq_proxies['krc'], cyq_proxies['next_ret']))
            #         # cyq_proxies['date'] = trading_day
            #         cyq_proxies['id'] = Utils.code_to_symbol(stock_info.symbol)
            #         df_proxies = df_proxies.append(cyq_proxies, ignore_index=True)

            # 采用多进程进行并行计算筹码分布因子的代理变量
            q = Manager().Queue()  # 队列, 用于进程间通信, 存储每个进程计算的因子载荷
            p = Pool(4)  # 进程池, 最多同时开启4个进程
            for _, stock_info in stock_basics.iterrows():
                p.apply_async(cls._calc_factor_loading_proc,
                              args=(
                                  stock_info.symbol,
                                  calc_date,
                                  q,
                              ))
            p.close()
            p.join()
            while not q.empty():
                cyq_proxies = q.get(True)
                # cyq_proxies['date'] = trading_day
                df_proxies = df_proxies.append(cyq_proxies, ignore_index=True)

            # 保存筹码分布代理变量数据
            df_proxies['date'] = trading_day
            proxies_file_path = cls._db_proxies_path + '_%s.csv' % Utils.datetimelike_to_str(
                calc_date, dash=False)
            df_proxies.to_csv(
                proxies_file_path,
                index=False,
                columns=['date', 'id', 'arc', 'vrc', 'src', 'krc', 'next_ret'])

            # 导入筹码分布因子的代理变量数据
            # cyq_proxies_path = cls._db_proxies_path + '_%s.csv' % Utils.datetimelike_to_str(calc_date, dash=False)
            # df_proxies = pd.read_csv(cyq_proxies_path, header=0)

            # 计算marc, 代理变量权重及筹码分布因子载荷
            marc = df_proxies['arc'].median()
            proxies_weight_file = Path(factor_ct.FACTOR_DB.db_path,
                                       factor_ct.CYQ_CT.proxies_weight_file)
            if proxies_weight_file.exists():
                df_proxies_weight = pd.read_csv(proxies_weight_file,
                                                header=0,
                                                parse_dates=[0])
                df_proxies_weight = df_proxies_weight[
                    df_proxies_weight.date < calc_date].tail(24)
                if len(df_proxies_weight) < 24:
                    with open(proxies_weight_file, 'a', newline='') as f:
                        csv_writer = csv.writer(f)
                        csv_writer.writerow([
                            calc_date.strftime('%Y-%m-%d'), marc, 0, 0, 0, 0, 0
                        ])
                else:
                    df_proxies_data = DataFrame()
                    if marc > 0:
                        df_proxies_weight = df_proxies_weight[
                            df_proxies_weight.marc > 0]
                    elif marc < 0:
                        df_proxies_weight = df_proxies_weight[
                            df_proxies_weight.marc < 0]
                    for _, weight_info in df_proxies_weight.iterrows():
                        proxies_file_path = cls._db_proxies_path + '_%s.csv' % Utils.datetimelike_to_str(
                            weight_info['date'], False)
                        df_proxies_data = df_proxies_data.append(
                            pd.read_csv(proxies_file_path, header=0),
                            ignore_index=True)
                    next_ret = np.array(df_proxies_data['next_ret'])
                    cyq_data = np.array(
                        df_proxies_data[['arc', 'vrc', 'src', 'krc']])
                    cyq_data = sm.add_constant(cyq_data)
                    cyq_model = sm.OLS(next_ret, cyq_data)
                    cyq_result = cyq_model.fit()
                    cyq_weights = np.around(cyq_result.params, 6)
                    with open(proxies_weight_file, 'a', newline='') as f:
                        csv_writer = csv.writer(f)
                        csv_writer.writerow([
                            calc_date.strftime('%Y-%m-%d'), marc,
                            cyq_weights[0], cyq_weights[1], cyq_weights[2],
                            cyq_weights[3], cyq_weights[4]
                        ])
                    # 计算筹码分布因子载荷
                    arr_proxies = np.array(
                        df_proxies[['arc', 'vrc', 'src', 'krc']])
                    arr_weight = np.array([
                        cyq_weights[1], cyq_weights[2], cyq_weights[3],
                        cyq_weights[4]
                    ]).reshape((4, 1))
                    intercept = cyq_weights[0]
                    arr_cyq = np.around(
                        np.dot(arr_proxies, arr_weight) + intercept, 6)
                    dict_cyq = {
                        'date': list(df_proxies['date']),
                        'id': list(df_proxies['id']),
                        'factorvalue': list(arr_cyq.reshape((len(arr_cyq), )))
                    }
                    # 保存因子载荷至因子数据库
                    if save:
                        Utils.factor_loading_persistent(
                            cls._db_file,
                            calc_date.strftime('%Y%m%d'),
                            dict_cyq,
                            columns=['date', 'id', 'factorvalue'])
            else:
                with open(proxies_weight_file, 'w', newline='') as f:
                    csv_writer = csv.writer(f)
                    csv_writer.writerow([
                        'date', 'marc', 'intcpt', 'arc_w', 'vrc_w', 'src_w',
                        'krc_w'
                    ])
                    csv_writer.writerow(
                        [calc_date.strftime('%Y-%m-%d'), marc, 0, 0, 0, 0, 0])
            # 休息300秒
            logging.info('Suspending for 200s.')
            time.sleep(200)
Пример #21
0
    def _calc_factor_loading(cls, code, calc_date):
        """
        计算指定日期、指定个股APM因子的stat统计量
        --------
        :param code: string
            个股代码,如600000
        :param calc_date: datetime-like, str
            因子载荷计算日期,格式YYYY-MM-DD
        :return: float
        --------
            stat统计量,计算APM因子载荷的中间变量
        """
        # 1.取得过去40个交易日序列,交易日按降序排列
        calc_date = Utils.to_date(calc_date)
        trading_days = Utils.get_trading_days(end=calc_date,
                                              ndays=40,
                                              ascending=False)

        # 2.取得个股及指数过去__days+1个交易日每个交易日的开盘价、中午收盘价和当天收盘价
        #   开盘价为09:31分钟线的开盘价,中午收盘价为11:30分钟线的收盘价,当天收盘价为15:00分钟线的收盘价
        #   返回的数据格式为DataFrame,columns=['date','open','mid_close','close'],按日期升序排列
        # secu_mkt_data = DataFrame()
        # index_mkt_data = DataFrame()
        # mkt_data_header = ['date', 'open', 'mid_close', 'close']
        # k = 0
        # for trading_day in trading_days:
        #     df_1min_data = Utils.get_min_mkt(Utils.code_to_symbol(code), trading_day, fq=True)
        #     if df_1min_data is not None:
        #         str_date = Utils.datetimelike_to_str(trading_day)
        #         fopen = df_1min_data[df_1min_data.datetime == '%s 09:31:00' % str_date].iloc[0].open
        #         fmid_close = df_1min_data[df_1min_data.datetime == '%s 11:30:00' % str_date].iloc[0].close
        #         fclose = df_1min_data[df_1min_data.datetime == '%s 15:00:00' % str_date].iloc[0].close
        #         secu_mkt_data = secu_mkt_data.append(
        #             Series([str_date, fopen, fmid_close, fclose], index=mkt_data_header), ignore_index=True)
        #
        #         df_1min_data = Utils.get_min_mkt(factor_ct.APM_CT.index_code, trading_day, index=True, fq=True)
        #         fopen = df_1min_data[df_1min_data.datetime == '%s 09:31:00' % str_date].iloc[0].open
        #         fmid_close = df_1min_data[df_1min_data.datetime == '%s 11:30:00' % str_date].iloc[0].close
        #         fclose = df_1min_data[df_1min_data.datetime == '%s 15:00:00' % str_date].iloc[0].close
        #         index_mkt_data = index_mkt_data.append(
        #             Series([str_date, fopen, fmid_close, fclose], index=mkt_data_header), ignore_index=True)
        #         k += 1
        #         if k > cls.__days:
        #             break
        # if k <= cls.__days:
        #     return None
        # secu_mkt_data = secu_mkt_data.sort_values(by='date')
        # secu_mkt_data = secu_mkt_data.reset_index(drop=True)
        # index_mkt_data = index_mkt_data.sort_values(by='date')
        # index_mkt_data = index_mkt_data.reset_index(drop=True)
        # #  3.计算个股及指数的上午收益率数组r_t^{am},R_t^{am}和下午收益率数组r_t^{pm},R_t^{pm},并拼接为一个数组
        # #    拼接后的收益率数组,上半部分为r_t^{am} or R_t^{am},下半部分为r_t^{pm} or R_t^{pm}
        # r_am_array = np.zeros((cls.__days, 1))
        # r_pm_array = np.zeros((cls.__days, 1))
        # for ind in secu_mkt_data.index[1:]:
        #     r_am_array[ind-1, 0] = secu_mkt_data.loc[ind, 'mid_close'] / secu_mkt_data.loc[ind-1, 'close'] - 1.0
        #     r_pm_array[ind-1, 0] = secu_mkt_data.loc[ind, 'close'] / secu_mkt_data.loc[ind, 'mid_close'] - 1.0
        # r_apm_array = np.concatenate((r_am_array, r_pm_array), axis=0)
        #
        # R_am_array = np.zeros((cls.__days, 1))
        # R_pm_array = np.zeros((cls.__days, 1))
        # for ind in index_mkt_data.index[1:]:
        #     R_am_array[ind-1, 0] = index_mkt_data.loc[ind, 'mid_close'] / index_mkt_data.loc[ind-1, 'close'] - 1.0
        #     R_pm_array[ind-1, 0] = index_mkt_data.loc[ind, 'close'] / index_mkt_data.loc[ind, 'mid_close'] - 1.0
        # R_apm_array = np.concatenate((R_am_array, R_pm_array), axis=0)

        # 遍历交易日序列,计算个股及指数的上午收益率(r_am_array,R_am_array)和下午收益率序列(r_pm_array,R_pm_array)
        r_am_array = np.zeros((cls.__days, 1))
        r_pm_array = np.zeros((cls.__days, 1))
        R_am_array = np.zeros((cls.__days, 1))
        R_pm_array = np.zeros((cls.__days, 1))
        k = 0
        for trading_day in trading_days:
            df_1min_data = Utils.get_min_mkt(Utils.code_to_symbol(code),
                                             trading_day,
                                             fq=True)
            if df_1min_data is not None:
                str_date = Utils.datetimelike_to_str(trading_day)
                fopen = df_1min_data[df_1min_data.datetime == '%s 09:31:00' %
                                     str_date].iloc[0].open
                fmid_close = df_1min_data[df_1min_data.datetime ==
                                          '%s 11:30:00' %
                                          str_date].iloc[0].close
                fclose = df_1min_data[df_1min_data.datetime == '%s 15:00:00' %
                                      str_date].iloc[0].close
                r_am_array[k, 0] = fmid_close / fopen - 1.0
                r_pm_array[k, 0] = fclose / fmid_close - 1.0

                df_1min_data = Utils.get_min_mkt(factor_ct.APM_CT.index_code,
                                                 trading_day,
                                                 index=True,
                                                 fq=True)
                fopen = df_1min_data[df_1min_data.datetime == '%s 09:31:00' %
                                     str_date].iloc[0].open
                fmid_close = df_1min_data[df_1min_data.datetime ==
                                          '%s 11:30:00' %
                                          str_date].iloc[0].close
                fclose = df_1min_data[df_1min_data.datetime == '%s 15:00:00' %
                                      str_date].iloc[0].close
                R_am_array[k, 0] = fmid_close / fopen - 1.0
                R_pm_array[k, 0] = fclose / fmid_close - 1.0

                k += 1
                if k == cls.__days:
                    break
        if k < cls.__days:
            return None
        r_apm_array = np.concatenate((r_am_array, r_pm_array), axis=0)
        R_apm_array = np.concatenate((R_am_array, R_pm_array), axis=0)

        # 4.个股收益率数组相对于指数收益率进行线性回归
        #   将指数收益率数组添加常数项
        R_apm_array = sm.add_constant(R_apm_array)
        #   线性回归:r_i = \alpha + \beta * R_i + \epsilon_i
        stat_model = sm.OLS(r_apm_array, R_apm_array)
        stat_result = stat_model.fit()
        resid_array = stat_result.resid.reshape((cls.__days * 2, 1))  # 回归残差数组
        # 5.计算stat统计量
        #   以上得到的__days*2个残差\epsilon_i中,属于上午的记为\epsilon_i^{am},属于下午的记为\epsilong_i^{pm},计算每日上午与
        #   下午残差的差值:$\sigma_t = \spsilon_i^{am} - \epsilon_i^{pm}$,为了衡量上午与下午残差的差异程度,设计统计量:
        #   $stat = \frac{\mu(\sigma_t)}{\delta(\sigma_t)\sqrt(N)}$,其中\mu为均值,\sigma为标准差,N=__days,总的来说
        #   统计量stat反映了剔除市场影响后股价行为上午与下午的差异程度。stat数值大(小)于0越多,则股票在上午的表现越好(差)于下午。
        delta_array = resid_array[:cls.__days] - resid_array[
            cls.__days:]  # 上午与 下午的残差差值
        delta_avg = np.mean(delta_array)  # 残差差值的均值
        delta_std = np.std(delta_array)  # 残差差值的标准差
        # 如果残差差值的标准差接近于0,返回None
        if np.fabs(delta_std) < 0.0001:
            return None
        stat = delta_avg / delta_std / np.sqrt(cls.__days)
        # logging.info('%s, stat = %.6f' % (code, stat))
        return stat
Пример #22
0
    def calc_factor_loading(cls, start_date, end_date=None, month_end=True, save=False, **kwargs):
        """
        计算指定日期的样本个股的因子载荷, 并保存至因子数据库
        Parameters:
        --------
        :param start_date: datetime-like, str
            开始日期, 格式: YYYY-MM-DD or YYYYMMDD
        :param end_date: datetime-like, str
            结束日期, 如果为None, 则只计算start_date日期的因子载荷, 格式: YYYY-MM-DD or YYYYMMDD
        :param month_end: bool, 默认为True
            如果为True, 则只计算月末时点的因子载荷
        :param save: bool, 默认True
            是否保存至因子数据库
        :param kwargs:
        :return: dict
            因子载荷
        """
        # 取得交易日序列及股票基本信息表
        start_date = Utils.to_date(start_date)
        if end_date is not None:
            end_date = Utils.to_date(end_date)
            trading_days_series = Utils.get_trading_days(start=start_date, end=end_date)
        else:
            trading_days_series = Utils.get_trading_days(end=start_date, ndays=1)
        # all_stock_basics = CDataHandler.DataApi.get_secu_basics()
        # 遍历交易日序列, 计算筹码分布因子载荷
        dict_beta = {}
        dict_hsigma = {}
        for calc_date in trading_days_series:
            if month_end and (not Utils.is_month_end(calc_date)):
                continue
            logging.info('[%s] Calc BETA factor loading.' % Utils.datetimelike_to_str(calc_date))
            # 遍历个股, 计算个股BETA因子值
            # s = (calc_date - datetime.timedelta(days=risk_ct.DBETA_CT.listed_days)).strftime('%Y%m%d')
            # stock_basics = all_stock_basics[all_stock_basics.list_date < s]
            s = calc_date - datetime.timedelta(days=risk_ct.DBETA_CT.listed_days)
            stock_basics = Utils.get_stock_basics(s, False)
            ids = []        # 个股代码list
            betas = []      # BETA因子值
            hsigmas = []    # HSIGMA因子值

            if 'multi_proc' not in kwargs:
                kwargs['multi_proc'] = False
            if not kwargs['multi_proc']:
                # 采用单进程计算BETA因子和HSIGMA因子值,
                for _, stock_info in stock_basics.iterrows():
                    logging.debug("[%s] Calc %s's BETA and HSIGMA factor data." % (calc_date.strftime('%Y-%m-%d'), stock_info.symbol))
                    beta_data = cls._calc_factor_loading(stock_info.symbol, calc_date)
                    if beta_data is None:
                        ids.append(Utils.code_to_symbol(stock_info.symbol))
                        betas.append(np.nan)
                        hsigmas.append(np.nan)
                    else:
                        ids.append(beta_data['code'])
                        betas.append(beta_data['beta'])
                        hsigmas.append(beta_data['hsigma'])
            else:
                # 采用多进程并行计算BETA因子和HSIGMA因子值
                q = Manager().Queue()   # 队列, 用于进程间通信, 存储每个进程计算的因子载荷
                p = Pool(SETTINGS.CONCURRENCY_KERNEL_NUM)             # 进程池, 最多同时开启4个进程
                for _, stock_info in stock_basics.iterrows():
                    p.apply_async(cls._calc_factor_loading_proc, args=(stock_info.symbol, calc_date, q,))
                p.close()
                p.join()
                while not q.empty():
                    beta_data = q.get(True)
                    ids.append(beta_data['code'])
                    betas.append(beta_data['beta'])
                    hsigmas.append(beta_data['hsigma'])

            date_label = Utils.get_trading_days(calc_date, ndays=2)[1]
            dict_beta = {'date': [date_label]*len(ids), 'id': ids, 'factorvalue': betas}
            dict_hsigma = {'date': [date_label]*len(ids), 'id': ids, 'factorvalue': hsigmas}
            if save:
                Utils.factor_loading_persistent(cls._db_file, Utils.datetimelike_to_str(calc_date, dash=False), dict_beta, ['date', 'id', 'factorvalue'])
                hsigma_path = os.path.join(factor_ct.FACTOR_DB.db_path, risk_ct.HSIGMA_CT.db_file)
                Utils.factor_loading_persistent(hsigma_path, Utils.datetimelike_to_str(calc_date, dash=False), dict_hsigma, ['date', 'id', 'factorvalue'])
            # 休息180秒
            # logging.info('Suspending for 180s.')
            # time.sleep(180)
        return dict_beta
Пример #23
0
    def calc_factor_loading(cls, start_date, end_date=None, month_end=True, save=False, **kwargs):
        """
        计算指定日期的样本个股的因子载荷, 并保存至因子数据库
        Parameters:
        --------
        :param start_date: datetime-like, str
            开始日期, 格式: YYYY-MM-DD or YYYYMMDD
        :param end_date: datetime-like, str
            结束日期, 如果为None, 则只计算start_date日期的因子载荷, 格式: YYYY-MM-DD or YYYYMMDD
        :param month_end: bool, 默认为True
            如果为True, 则只计算月末时点的因子载荷
        :param save: bool, 默认为True
            是否保存至因子数据库
        :param kwargs:
            'multi_proc': bool, True=采用多进程, False=采用单进程, 默认为False
        :return: dict
            因子载荷
        """
        # 取得交易日序列及股票基本信息表
        start_date = Utils.to_date(start_date)
        if end_date is not None:
            end_date = Utils.to_date(end_date)
            trading_days_series = Utils.get_trading_days(start=start_date, end=end_date)
        else:
            trading_days_series = Utils.get_trading_days(end=start_date, ndays=1)
        all_stock_basics = CDataHandler.DataApi.get_secu_basics()
        # 遍历交易日序列, 计算DASTD因子载荷
        dict_dastd = None
        for calc_date in trading_days_series:
            if month_end and (not Utils.is_month_end(calc_date)):
                continue
            logging.info('[%s] Calc DASTD factor loading.' % Utils.datetimelike_to_str(calc_date))
            # 遍历个股, 计算个股的DASTD因子值
            s = (calc_date - datetime.timedelta(days=risk_ct.DASTD_CT.listed_days)).strftime('%Y%m%d')
            stock_basics = all_stock_basics[all_stock_basics.list_date < s]
            ids = []        # 个股代码list
            dastds = []     # DASTD因子值list

            if 'multi_proc' not in kwargs:
                kwargs['multi_proc'] = False
            if not kwargs['multi_proc']:
                # 采用单进程计算DASTD因子值
                for _, stock_info in stock_basics.iterrows():
                    logging.info("[%s] Calc %s's DASTD factor loading." % (calc_date.strftime('%Y-%m-%d'), stock_info.symbol))
                    dastd_data = cls._calc_factor_loading(stock_info.symbol, calc_date)
                    if dastd_data is None:
                        ids.append(Utils.code_to_symbol(stock_info.symbol))
                        dastds.append(np.nan)
                    else:
                        ids.append(dastd_data['code'])
                        dastds.append(dastd_data['dastd'])
            else:
                # 采用多进程并行计算DASTD因子值
                q = Manager().Queue()   # 队列, 用于进程间通信, 存储每个进程计算的因子载荷
                p = Pool(4)             # 进程池, 最多同时开启4个进程
                for _, stock_info in stock_basics.iterrows():
                    p.apply_async(cls._calc_factor_loading_proc, args=(stock_info.symbol, calc_date, q,))
                p.close()
                p.join()
                while not q.empty():
                    dastd_data = q.get(True)
                    ids.append(dastd_data['code'])
                    dastds.append(dastd_data['dastd'])

            date_label = Utils.get_trading_days(start=calc_date, ndays=2)[1]
            dict_dastd = {'date': [date_label]*len(ids), 'id': ids, 'factorvalue': dastds}
            if save:
                Utils.factor_loading_persistent(cls._db_file, Utils.datetimelike_to_str(calc_date, dash=False), dict_dastd, ['date', 'id', 'factorvalue'])
            # 暂停180秒
            logging.info('Suspending for 180s.')
            # time.sleep(180)
        return dict_dastd
Пример #24
0
 def _calc_factor_loading(cls, code, calc_date):
     """
     计算指定日期、指定个股BETA因子载荷
     Parameters:
     --------
     :param code: str
         个股代码, 如600000或SH600000
     :param calc_date: datetime-like, str
         计算日期, 格式YYYY-MM-DD
     :return: pd.Series
     --------
         个股的BETA因子和HSIGMA因子载荷
         0. code: 个股代码
         1. beta: BETA因子载荷
         2. hsigma: HSIGMA因子载荷
         若计算失败, 返回None
     """
     # 取得个股复权行情数据
     df_secu_quote = Utils.get_secu_daily_mkt(code, end=calc_date, ndays=risk_ct.DBETA_CT.trailing+1, fq=True)
     if df_secu_quote is None:
         return None
     # 如果行情数据长度小于半年(126个交易日), 那么返回None
     if len(df_secu_quote) < 126:
         return None
     # 如果读取的行情数据起始日距离计算日期大于trailing的3倍, 返回None
     s = Utils.to_date(calc_date) - datetime.timedelta(days=risk_ct.DBETA_CT.trailing*3)
     if Utils.to_date(df_secu_quote.iloc[0]['date']) < s:
         return None
     df_secu_quote.reset_index(drop=True, inplace=True)
     # 取得基准复权行情数据
     benchmark_code = risk_ct.DBETA_CT.benchmark
     df_benchmark_quote = Utils.get_secu_daily_mkt(benchmark_code, end=calc_date, fq=True)
     if df_benchmark_quote is None:
         return None
     df_benchmark_quote = df_benchmark_quote[df_benchmark_quote['date'].isin(list(df_secu_quote['date']))]
     if len(df_benchmark_quote) != len(df_secu_quote):
         raise ValueError("[beta计算]基准和个股的历史行情长度不一致.")
     df_benchmark_quote.reset_index(drop=True, inplace=True)
     # 计算个股和基准的日收益率序列
     arr_secu_close = np.array(df_secu_quote.iloc[1:]['close'])
     arr_secu_preclose = np.array(df_secu_quote.shift(1).iloc[1:]['close'])
     arr_secu_daily_ret = arr_secu_close / arr_secu_preclose - 1.
     arr_benchmark_close = np.array(df_benchmark_quote.iloc[1:]['close'])
     arr_benchmark_preclose = np.array(df_benchmark_quote.shift(1).iloc[1:]['close'])
     arr_benchmark_daily_ret = arr_benchmark_close / arr_benchmark_preclose - 1.
     # 计算权重(指数移动加权平均)
     T = len(arr_benchmark_daily_ret)
     # time_spans = sorted(range(T), reverse=True)
     # alpha = 1 - np.exp(np.log(0.5)/risk_ct.DBETA_CT.half_life)
     # x = [1-alpha] * T
     # y = [alpha] * (T-1)
     # y.insert(0, 1)
     # weights = np.float_power(x, time_spans) * y
     weights = Algo.ewma_weight(T, risk_ct.DBETA_CT.half_life)
     # 采用加权最小二乘法计算Beta因子载荷及hsigma
     arr_benchmark_daily_ret = sm.add_constant(arr_benchmark_daily_ret)
     cap_model = sm.WLS(arr_secu_daily_ret, arr_benchmark_daily_ret, weights=weights)
     result = cap_model.fit()
     beta = result.params[1]
     hsigma = np.sqrt(result.mse_resid)
     return pd.Series([Utils.code_to_symbol(code), beta, hsigma], index=['code', 'beta', 'hsigma'])
Пример #25
0
    def calc_factor_loading(cls,
                            start_date,
                            end_date=None,
                            month_end=True,
                            save=False,
                            **kwargs):
        """
        计算指定日期的样本个股的因子载荷,并保存至因子数据库
        Parameters
        --------
        :param start_date: datetime-like, str
            开始日期,格式:YYYY-MM-DD or YYYYMMDD
        :param end_date: datetime-like, str
            结束日期,如果为None,则只计算start_date日期的因子载荷,格式:YYYY-MM-DD or YYYYMMDD
        :param month_end: bool,默认True
            如果为True,则只计算月末时点的因子载荷
        :param save: bool,默认False
            是否保存至因子数据库
        :param kwargs['synthetic_factor']: bool, 默认为False
            是否计算合成因子
        :return: 因子载荷,DataFrame
        --------
            因子载荷,DataFrame
            0. date: 日期
            1. id: 证券symbol
            2. m0: 隔夜时段动量
            3. m1: 第一个小时动量
            4. m2: 第二个小时动量
            5. m3: 第三个小时动量
            6. m4: 第四个小时动量
            7. m_normal: 传统动量
        """
        # 取得交易日序列及股票基本信息表
        start_date = Utils.to_date(start_date)
        if end_date is not None:
            end_date = Utils.to_date(end_date)
            trading_days_series = Utils.get_trading_days(start=start_date,
                                                         end=end_date)
        else:
            trading_days_series = Utils.get_trading_days(end=start_date,
                                                         ndays=1)
        all_stock_basics = CDataHandler.DataApi.get_secu_basics()
        # 遍历交易日序列,计算日内动量因子值
        dict_intraday_momentum = None
        for calc_date in trading_days_series:
            if month_end and (not Utils.is_month_end(calc_date)):
                continue
            logging.info(
                '[%s] calc synthetic intraday momentum factor loading.' %
                Utils.datetimelike_to_str(calc_date))
            if 'synthetic_factor' in kwargs and kwargs[
                    'synthetic_factor']:  # 计算日内合成动量因子
                dict_intraday_momentum = {
                    'date': [],
                    'id': [],
                    'factorvalue': []
                }
                # 读取日内个时段动量因子值
                df_factor_loading = Utils.read_factor_loading(
                    cls._db_file, Utils.datetimelike_to_str(calc_date, False))
                if df_factor_loading.shape[0] <= 0:
                    logging.info(
                        "[%s] It doesn't exist intraday momentum factor loading."
                        % Utils.datetimelike_to_str(calc_date))
                    return
                df_factor_loading.fillna(0, inplace=True)
                # 读取因子最优权重
                factor_weight = cls.get_factor_weight(calc_date)
                if factor_weight is None:
                    logging.info("[%s] It doesn't exist factor weight.")
                    return
                # 计算合成动量因子
                arr_factor_loading = np.array(
                    df_factor_loading[['m0', 'm1', 'm2', 'm3', 'm4']])
                arr_factor_weight = np.array(
                    factor_weight.drop('date')).reshape((5, 1))
                arr_synthetic_factor = np.dot(arr_factor_loading,
                                              arr_factor_weight)
                # arr_synthetic_factor = np.around(arr_synthetic_factor, 6)
                dict_intraday_momentum['date'] = list(
                    df_factor_loading['date'])
                dict_intraday_momentum['id'] = list(df_factor_loading['id'])
                dict_intraday_momentum['factorvalue'] = list(
                    arr_synthetic_factor.astype(float).round(6).reshape(
                        (arr_synthetic_factor.shape[0], )))
                # 保存合成因子
                if save:
                    synthetic_db_file = os.path.join(
                        factor_ct.FACTOR_DB.db_path,
                        factor_ct.INTRADAYMOMENTUM_CT.synthetic_db_file)
                    Utils.factor_loading_persistent(
                        synthetic_db_file,
                        Utils.datetimelike_to_str(calc_date, False),
                        dict_intraday_momentum)
            else:  # 计算日内各时段动量因子
                dict_intraday_momentum = {
                    'date': [],
                    'id': [],
                    'm0': [],
                    'm1': [],
                    'm2': [],
                    'm3': [],
                    'm4': [],
                    'm_normal': []
                }
                # 遍历个股,计算个股日内动量值
                s = (calc_date -
                     datetime.timedelta(days=90)).strftime('%Y%m%d')
                stock_basics = all_stock_basics[all_stock_basics.list_date < s]

                # 采用单进程进行计算
                # for _, stock_info in stock_basics.iterrows():
                #     momentum_data = cls._calc_factor_loading(stock_info.symbol, calc_date)
                #     if momentum_data is not None:
                #         logging.info("[%s] %s's intraday momentum = (%0.4f,%0.4f,%0.4f,%0.4f,%0.4f,%0.4f)" % (calc_date.strftime('%Y-%m-%d'),stock_info.symbol, momentum_data.m0, momentum_data.m1, momentum_data.m2, momentum_data.m3, momentum_data.m4, momentum_data.m_normal))
                #         dict_intraday_momentum['id'].append(Utils.code_to_symbol(stock_info.symbol))
                #         dict_intraday_momentum['m0'].append(round(momentum_data.m0, 6))
                #         dict_intraday_momentum['m1'].append(round(momentum_data.m1, 6))
                #         dict_intraday_momentum['m2'].append(round(momentum_data.m2, 6))
                #         dict_intraday_momentum['m3'].append(round(momentum_data.m3, 6))
                #         dict_intraday_momentum['m4'].append(round(momentum_data.m4, 6))
                #         dict_intraday_momentum['m_normal'].append(round(momentum_data.m_normal, 6))

                # 采用多进程并行计算日内动量因子载荷
                q = Manager().Queue()  # 队列,用于进程间通信,存储每个进程计算的因子载荷
                p = Pool(4)  # 进程池,最多同时开启4个进程
                for _, stock_info in stock_basics.iterrows():
                    p.apply_async(cls._calc_factor_loading_proc,
                                  args=(
                                      stock_info.symbol,
                                      calc_date,
                                      q,
                                  ))
                p.close()
                p.join()
                while not q.empty():
                    momentum_data = q.get(True)
                    dict_intraday_momentum['id'].append(momentum_data[0])
                    dict_intraday_momentum['m0'].append(
                        round(momentum_data[1], 6))
                    dict_intraday_momentum['m1'].append(
                        round(momentum_data[2], 6))
                    dict_intraday_momentum['m2'].append(
                        round(momentum_data[3], 6))
                    dict_intraday_momentum['m3'].append(
                        round(momentum_data[4], 6))
                    dict_intraday_momentum['m4'].append(
                        round(momentum_data[5], 6))
                    dict_intraday_momentum['m_normal'].append(
                        round(momentum_data[6], 6))

                date_label = Utils.get_trading_days(calc_date, ndays=2)[1]
                dict_intraday_momentum['date'] = [date_label] * len(
                    dict_intraday_momentum['id'])
                # 保存因子载荷至因子数据库
                if save:
                    Utils.factor_loading_persistent(
                        cls._db_file, calc_date.strftime('%Y%m%d'),
                        dict_intraday_momentum)
                # 休息360秒
                logging.info('Suspending for 360s.')
                time.sleep(360)
        return dict_intraday_momentum
Пример #26
0
    def calc_factor_loading(cls,
                            start_date,
                            end_date=None,
                            month_end=True,
                            save=False,
                            **kwargs):
        """
        计算指定日期的样本个股的因子载荷, 并保存至因子数据库
        Parameters:
        --------
        :param start_date: datetime-like, str
            开始日期, 格式: YYYY-MM-DD or YYYYMMDD
        :param end_date: datetime-like, str
            结束日期, 如果为None, 则只计算start_date日期的因子载荷, 格式: YYYY-MM-DD or YYYYMMDD
        :param month_end: bool, 默认为True
            如果为True, 则只计算月末时点的因子载荷
        :param save: bool, 默认为True
            是否保存至因子数据库
        :param kwargs:
            'multi_proc': bool, True=采用多进程, False=采用单进程, 默认为False
        :return: dict
            因子载荷数据
        """
        # 取得交易日序列及股票基本信息表
        start_date = Utils.to_date(start_date)
        if end_date is not None:
            end_date = Utils.to_date(end_date)
            trading_days_series = Utils.get_trading_days(start=start_date,
                                                         end=end_date)
        else:
            trading_days_series = Utils.get_trading_days(end=start_date,
                                                         ndays=1)
        all_stock_basics = CDataHandler.DataApi.get_secu_basics()
        # 遍历交易日序列, 计算LIQUIDITY因子载荷
        dict_raw_liquidity = None
        for calc_date in trading_days_series:
            if month_end and (not Utils.is_month_end(calc_date)):
                continue
            dict_stom = None
            dict_stoq = None
            dict_stoa = None
            dict_raw_liquidity = None
            logging.info('[%s] Calc LIQUIDITY factor loading.' %
                         Utils.datetimelike_to_str(calc_date))
            # 遍历个股,计算个股LIQUIDITY因子值
            s = (calc_date - datetime.timedelta(
                days=risk_ct.LIQUID_CT.listed_days)).strftime('%Y%m%d')
            stock_basics = all_stock_basics[all_stock_basics.list_date < s]
            ids = []
            stoms = []
            stoqs = []
            stoas = []
            raw_liquidities = []

            if 'multi_proc' not in kwargs:
                kwargs['multi_proc'] = False
            if not kwargs['multi_proc']:
                # 采用单进程计算LIQUIDITY因子值
                for _, stock_info in stock_basics.iterrows():
                    logging.info("[%s] Calc %s's LIQUIDITY factor loading." %
                                 (Utils.datetimelike_to_str(
                                     calc_date, dash=True), stock_info.symbol))
                    liquidity_data = cls._calc_factor_loading(
                        stock_info.symbol, calc_date)
                    if liquidity_data is not None:
                        ids.append(liquidity_data['code'])
                        stoms.append(liquidity_data['stom'])
                        stoqs.append(liquidity_data['stoq'])
                        stoas.append(liquidity_data['stoa'])
                        raw_liquidities.append(liquidity_data['liquidity'])
            else:
                # 采用多进程计算LIQUIDITY因子值
                q = Manager().Queue()
                p = Pool(4)
                for _, stock_info in stock_basics.iterrows():
                    p.apply_async(cls._calc_factor_loading_proc,
                                  args=(
                                      stock_info.symbol,
                                      calc_date,
                                      q,
                                  ))
                p.close()
                p.join()
                while not q.empty():
                    liquidity_data = q.get(True)
                    ids.append(liquidity_data['code'])
                    stoms.append(liquidity_data['stom'])
                    stoqs.append(liquidity_data['stoq'])
                    stoas.append(liquidity_data['stoa'])
                    raw_liquidities.append(liquidity_data['liquidity'])

            date_label = Utils.get_trading_days(start=calc_date, ndays=2)[1]
            dict_stom = dict({
                'date': [date_label] * len(ids),
                'id': ids,
                'factorvalue': stoms
            })
            dict_stoq = dict({
                'date': [date_label] * len(ids),
                'id': ids,
                'factorvalue': stoqs
            })
            dict_stoa = dict({
                'date': [date_label] * len(ids),
                'id': ids,
                'factorvalue': stoas
            })
            dict_raw_liquidity = dict({
                'date': [date_label] * len(ids),
                'id': ids,
                'factorvalue': raw_liquidities
            })
            # 读取Size因子值, 将流动性因子与Size因子正交化
            size_factor_path = os.path.join(factor_ct.FACTOR_DB.db_path,
                                            risk_ct.SIZE_CT.db_file)
            df_size = Utils.read_factor_loading(
                size_factor_path,
                Utils.datetimelike_to_str(calc_date, dash=False))
            df_size.drop(columns='date', inplace=True)
            df_size.rename(columns={'factorvalue': 'size'}, inplace=True)
            df_liquidity = pd.DataFrame(
                dict({
                    'id': ids,
                    'liquidity': raw_liquidities
                }))
            df_liquidity = pd.merge(left=df_liquidity,
                                    right=df_size,
                                    how='inner',
                                    on='id')
            arr_liquidity = Utils.normalize_data(
                Utils.clean_extreme_value(
                    np.array(df_liquidity['liquidity']).reshape(
                        (len(df_liquidity), 1))))
            arr_size = Utils.normalize_data(
                Utils.clean_extreme_value(
                    np.array(df_liquidity['size']).reshape(
                        (len(df_liquidity), 1))))
            model = sm.OLS(arr_liquidity, arr_size)
            results = model.fit()
            df_liquidity['liquidity'] = results.resid
            df_liquidity.drop(columns='size', inplace=True)
            df_liquidity.rename(columns={'liquidity': 'factorvalue'},
                                inplace=True)
            df_liquidity['date'] = date_label
            # 保存因子载荷
            if save:
                str_date = Utils.datetimelike_to_str(calc_date, dash=False)
                factor_header = ['date', 'id', 'factorvalue']
                Utils.factor_loading_persistent(cls._db_file,
                                                'stom_{}'.format(str_date),
                                                dict_stom, factor_header)
                Utils.factor_loading_persistent(cls._db_file,
                                                'stoq_{}'.format(str_date),
                                                dict_stoq, factor_header)
                Utils.factor_loading_persistent(cls._db_file,
                                                'stoa_{}'.format(str_date),
                                                dict_stoa, factor_header)
                Utils.factor_loading_persistent(
                    cls._db_file, 'rawliquidity_{}'.format(str_date),
                    dict_raw_liquidity, factor_header)
                Utils.factor_loading_persistent(cls._db_file, str_date,
                                                df_liquidity.to_dict('list'),
                                                factor_header)

            # 暂停180秒
            logging.info('Suspending for 180s.')
            time.sleep(180)
        return dict_raw_liquidity
Пример #27
0
 def calc_factorloading(self, start_date, end_date=None):
     """
     计算风险因子的因子载荷
     Parameters:
     --------
     :param start_date: datetime-like, str
         计算开始日期, 格式: YYYY-MM-DD
     :param end_date: datetime-like, str
         计算结束日期, 格式: YYYY-MM-DD
     :return: None
     """
     # 读取交易日序列
     start_date = Utils.to_date(start_date)
     if not end_date is None:
         end_date = Utils.to_date(end_date)
         trading_days_series = Utils.get_trading_days(start=start_date,
                                                      end=end_date)
     else:
         trading_days_series = Utils.get_trading_days(start=start_date,
                                                      ndays=1)
     # 遍历交易日序列, 计算风险因子的因子载荷
     for calc_date in trading_days_series:
         Size.calc_factor_loading(start_date=start_date,
                                  end_date=None,
                                  month_end=False,
                                  save=True,
                                  multi_proc=True)
         Beta.calc_factor_loading(start_date=start_date,
                                  end_date=None,
                                  month_end=False,
                                  save=True,
                                  multi_proc=True)
         Momentum.calc_factor_loading(start_date=start_date,
                                      end_date=None,
                                      month_end=False,
                                      save=True,
                                      multi_proc=True)
         ResVolatility.calc_factor_loading(start_date=start_date,
                                           end_date=None,
                                           month_end=False,
                                           save=True,
                                           multi_proc=True)
         NonlinearSize.calc_factor_loading(start_date=start_date,
                                           end_date=None,
                                           month_end=False,
                                           save=True,
                                           multi_proc=True)
         Value.calc_factor_loading(start_date=start_date,
                                   end_date=None,
                                   month_end=False,
                                   save=True,
                                   multi_proc=True)
         Liquidity.calc_factor_loading(start_date=start_date,
                                       end_date=None,
                                       month_end=False,
                                       save=True,
                                       multi_proc=True)
         EarningsYield.calc_factor_loading(start_date=start_date,
                                           end_date=None,
                                           month_end=False,
                                           save=True,
                                           multi_proc=True)
         Growth.calc_factor_loading(start_date=start_date,
                                    end_date=None,
                                    month_end=False,
                                    save=True,
                                    multi_proc=True)
         Leverage.calc_factor_loading(start_date=start_date,
                                      end_date=None,
                                      month_end=False,
                                      save=True,
                                      multi_proc=True)
Пример #28
0
    def get_dependent_factors(cls, date):
        """
        计算用于因子提纯的相关性因子值,包换行业、规模、价值、成长、短期动量、长期动量
        Parameters:
        --------
        :param date: datetime-like or str
            日期
        :return: pd.DataFrame
            index为个股代码, columns=[28个申万一级行业,规模(scale),价值(value),成长(growth),短期动量(short_momentum),长期动量(long_momentum)]
        """
        str_date = Utils.to_date(date).strftime('%Y%m%d')
        # 1. 行业因子
        # 1.1. 读取行业分类信息
        df_industry_calssify = Utils.get_industry_classify()
        df_industry_calssify = df_industry_calssify.set_index('id')
        # 1.2. 构建行业分裂哑变量
        df_industry_dummies = pd.get_dummies(df_industry_calssify['ind_code'])
        # 2. 规模因子
        # 2.1. 读取规模因子
        scale_factor_path = os.path.join(factor_ct.FACTOR_DB.db_path,
                                         factor_ct.SCALE_CT.db_file)
        df_scale_raw = Utils.read_factor_loading(scale_factor_path,
                                                 str_date,
                                                 nan_value=0)
        # 2.2. 规模因子去极值、标准化
        scale_cleaned_arr = Utils.clean_extreme_value(
            np.array(df_scale_raw[['LnLiquidMktCap', 'LnTotalMktCap']]))
        scale_normalized_arr = Utils.normalize_data(scale_cleaned_arr)
        # 2.3. 规模因子降维
        scale_factor_arr = np.mean(scale_normalized_arr, axis=1)
        scale_factor = Series(scale_factor_arr, index=df_scale_raw['id'])
        # 3. 价值因子
        # 3.1. 读取价值因子
        value_factor_path = os.path.join(factor_ct.FACTOR_DB.db_path,
                                         factor_ct.VALUE_CT.db_file)
        df_value_raw = Utils.read_factor_loading(value_factor_path,
                                                 str_date,
                                                 nan_value=0)
        # 3.2. 价值因子去极值、标准化
        value_cleaned_arr = Utils.clean_extreme_value(
            np.array(df_value_raw[['ep_ttm', 'bp_lr', 'ocf_ttm']]))
        value_normalized_arr = Utils.normalize_data(value_cleaned_arr)
        # 3.3. 价值因子降维
        value_factor_arr = np.mean(value_normalized_arr, axis=1)
        value_factor = Series(value_factor_arr, index=df_value_raw['id'])
        # 4. 成长因子
        # 4.1. 读取成长因子
        growth_factor_path = os.path.join(factor_ct.FACTOR_DB.db_path,
                                          factor_ct.GROWTH_CT.db_file)
        df_growth_raw = Utils.read_factor_loading(growth_factor_path,
                                                  str_date,
                                                  nan_value=0)
        # 4.2. 成长因子去极值、标准化
        growth_cleaned_arr = Utils.clean_extreme_value(
            np.array(df_growth_raw[['npg_ttm', 'opg_ttm']]))
        growth_normalized_arr = Utils.normalize_data(growth_cleaned_arr)
        # 4.3. 成长因子降维
        growth_factor_arr = np.mean(growth_normalized_arr, axis=1)
        growth_factor = Series(growth_factor_arr, index=df_growth_raw['id'])
        # 5. 动量因子
        # 5.1. 读取动量因子
        mom_factor_path = os.path.join(factor_ct.FACTOR_DB.db_path,
                                       factor_ct.MOMENTUM_CT.db_file)
        df_mom_raw = Utils.read_factor_loading(mom_factor_path,
                                               str_date,
                                               nan_value=0)
        # 5.2. 动量因子去极值、标准化
        short_term_mom_header = [
            'short_term_' + d
            for d in factor_ct.MOMENTUM_CT.short_term_days.split('|')
        ]
        short_mom_cleaned_arr = Utils.clean_extreme_value(
            np.array(df_mom_raw[short_term_mom_header]))
        short_mom_normalized_arr = Utils.normalize_data(short_mom_cleaned_arr)
        long_term_mom_header = [
            'long_term_' + d
            for d in factor_ct.MOMENTUM_CT.long_term_days.split('|')
        ]
        long_mom_cleaned_arr = Utils.clean_extreme_value(
            np.array(df_mom_raw[long_term_mom_header]))
        long_mom_normalized_arr = Utils.normalize_data(long_mom_cleaned_arr)
        # 5.3. 动量因子降维
        short_mom_arr = np.mean(short_mom_normalized_arr, axis=1)
        short_mom = Series(short_mom_arr, index=df_mom_raw['id'])
        long_mom_arr = np.mean(long_mom_normalized_arr, axis=1)
        long_mom = Series(long_mom_arr, index=df_mom_raw['id'])

        # 拼接除行业因子外的因子
        df_style_factor = pd.concat(
            [scale_factor, value_factor, growth_factor, short_mom, long_mom],
            axis=1,
            keys=['scale', 'value', 'growth', 'short_mom', 'long_mom'],
            join='inner')
        # 再拼接行业因子
        df_dependent_factor = pd.concat([df_industry_dummies, df_style_factor],
                                        axis=1,
                                        join='inner')
        return df_dependent_factor
Пример #29
0
    def calc_factor_loading(cls,
                            start_date,
                            end_date=None,
                            month_end=True,
                            save=False,
                            **kwargs):
        """
        计算指定日期的样本个股的因子载荷,并保存至因子数据库
        Parameters
        --------
        :param start_date: datetime-like, str
            开始日期
        :param end_date: datetime-like, str,默认None
            结束日期,如果为None,则只计算start_date日期的因子载荷
        :param month_end: bool,默认True
            只计算月末时点的因子载荷
        :param save: 是否保存至因子数据库,默认为False
        :param kwargs:
            'multi_proc': bool, True=采用多进程并行计算, False=采用单进程计算, 默认为False
        :return: 因子载荷,DataFrame
        --------
            因子载荷,DataFrame
            0. date, 日期, 为计算日期的下一个交易日
            1: id, 证券代码
            2: factorvalue, 因子载荷
            如果end_date=None,返回start_date对应的因子载荷数据
            如果end_date!=None,返回最后一天的对应的因子载荷数据
            如果没有计算数据,返回None
        """
        # 0.取得交易日序列
        start_date = Utils.to_date(start_date)
        if end_date is not None:
            end_date = Utils.to_date(end_date)
            trading_days_series = Utils.get_trading_days(start=start_date,
                                                         end=end_date)
        else:
            trading_days_series = Utils.get_trading_days(end=start_date,
                                                         ndays=1)
        # 取得样本个股信息
        # all_stock_basics = CDataHandler.DataApi.get_secu_basics()
        # 遍历交易日序列,计算SMartQ因子载荷
        dict_factor = None
        for calc_date in trading_days_series:
            dict_factor = {'id': [], 'factorvalue': []}
            if month_end and (not Utils.is_month_end(calc_date)):
                continue
            # 1.获取用于读取分钟行情的交易日列表(过去30天的交易日列表,降序排列)
            # trading_days = _get_trading_days(calc_date, 30)
            # trading_days = Utils.get_trading_days(end=calc_date, ndays=30, ascending=False)
            # 2.取得样本个股信息
            # stock_basics = ts.get_stock_basics()
            s = (calc_date - datetime.timedelta(days=90)).strftime('%Y%m%d')
            stock_basics = Utils.get_stock_basics(s)
            # 3.遍历样本个股代码,计算Smart_Q因子载荷值
            dict_factor = {'date': None, 'id': [], 'factorvalue': []}

            if 'multi_proc' not in kwargs:
                kwargs['multi_proc'] = False
            if not kwargs['multi_proc']:
                # 采用单进程进行计算
                for _, stock_info in stock_basics.iterrows():
                    # code = '%s%s' % ('SH' if code[:2] == '60' else 'SZ', code)
                    factor_loading = cls._calc_factor_loading(
                        stock_info.symbol, calc_date)
                    print(
                        "[%s]Calculating %s's SmartMoney factor loading = %.4f."
                        % (calc_date.strftime('%Y-%m-%d'), stock_info.symbol,
                           -1.0 if factor_loading is None else factor_loading))
                    if factor_loading is not None:
                        # df_factor.ix[code, 'factorvalue'] = factor_loading
                        dict_factor['id'].append(
                            Utils.code_to_symbol(stock_info.symbol))
                        dict_factor['factorvalue'].append(factor_loading)
            else:
                # 采用多进程并行计算SmartQ因子载荷
                q = Manager().Queue()  # 队列,用于进程间通信,存储每个进程计算的因子载荷值
                p = Pool(4)  # 进程池,最多同时开启4个进程
                for _, stock_info in stock_basics.iterrows():
                    p.apply_async(cls._calc_factor_loading_proc,
                                  args=(
                                      stock_info.symbol,
                                      calc_date,
                                      q,
                                  ))
                p.close()
                p.join()
                while not q.empty():
                    smart_q = q.get(True)
                    dict_factor['id'].append(smart_q[0])
                    dict_factor['factorvalue'].append(smart_q[1])

            date_label = Utils.get_trading_days(calc_date, ndays=2)[1]
            dict_factor['date'] = [date_label] * len(dict_factor['id'])
            # 4.计算去极值标准化后的因子载荷
            df_std_factor = Utils.normalize_data(pd.DataFrame(dict_factor),
                                                 columns='factorvalue',
                                                 treat_outlier=True,
                                                 weight='eq')
            # 5.保存因子载荷至因子数据库
            if save:
                # Utils.factor_loading_persistent(cls._db_file, calc_date.strftime('%Y%m%d'), dict_factor)
                cls._save_factor_loading(cls._db_file,
                                         Utils.datetimelike_to_str(calc_date,
                                                                   dash=False),
                                         dict_factor,
                                         'SmartMoney',
                                         factor_type='raw',
                                         columns=['date', 'id', 'factorvalue'])
                cls._save_factor_loading(cls._db_file,
                                         Utils.datetimelike_to_str(calc_date,
                                                                   dash=False),
                                         df_std_factor,
                                         'SmartMoney',
                                         factor_type='standardized',
                                         columns=['date', 'id', 'factorvalue'])
            # 休息300秒
            logging.info('Suspending for 360s.')
            time.sleep(360)
        return dict_factor
Пример #30
0
    def calc_factor_loading(cls,
                            start_date,
                            end_date=None,
                            month_end=True,
                            save=False,
                            **kwargs):
        """
        计算指定日期的样本个股的因子载荷,并保存至因子数据库
        Parameters
        --------
        :param start_date: datetime-like, str
            开始日期
        :param end_date: datetime-like, str,默认None
            结束日期,如果为None,则只计算start_date日期的因子载荷
        :param month_end: bool,默认True
            只计算月末时点的因子载荷,该参数只在end_date不为None时有效,并且不论end_date是否为None,都会计算第一天的因子载荷
        :param save: 是否保存至因子数据库,默认为False
        :return: 因子载荷,DataFrame
        --------
            因子载荷,DataFrame
            0: id, 证券ID
            1: factorvalue, 因子载荷
            如果end_date=None,返回start_date对应的因子载荷数据
            如果end_date!=None,返回最后一天的对应的因子载荷数据
            如果没有计算数据,返回None
        """
        # 1.取得交易日序列及股票基本信息表
        start_date = Utils.to_date(start_date)
        if end_date is not None:
            end_date = Utils.to_date(end_date)
            trading_days_series = Utils.get_trading_days(start=start_date,
                                                         end=end_date)
        else:
            trading_days_series = Utils.get_trading_days(end=start_date,
                                                         ndays=1)
        all_stock_basics = CDataHandler.DataApi.get_secu_basics()
        # 2.遍历交易日序列,计算APM因子载荷
        dict_apm = None
        for calc_date in trading_days_series:
            dict_apm = {'date': [], 'id': [], 'factorvalue': []}
            if month_end and (not Utils.is_month_end(calc_date)):
                continue
            # 2.1.遍历个股,计算个股APM.stat统计量,过去20日收益率,分别放进stat_lst,ret20_lst列表中
            s = (calc_date - datetime.timedelta(days=90)).strftime('%Y%m%d')
            stock_basics = all_stock_basics[all_stock_basics.list_date < s]
            stat_lst = []
            ret20_lst = []
            symbol_lst = []

            # 采用单进程计算
            # for _, stock_info in stock_basics.iterrows():
            #     stat_i = cls._calc_factor_loading(stock_info.symbol, calc_date)
            #     ret20_i = Utils.calc_interval_ret(stock_info.symbol, end=calc_date, ndays=20)
            #     if stat_i is not None and ret20_i is not None:
            #         stat_lst.append(stat_i)
            #         ret20_lst.append(ret20_i)
            #         symbol_lst.append(Utils.code_to_symbol(stock_info.symbol))
            #         logging.info('APM of %s = %f' % (stock_info.symbol, stat_i))

            # 采用多进程并行计算
            q = Manager().Queue()
            p = Pool(4)  # 最多同时开启4个进程
            for _, stock_info in stock_basics.iterrows():
                p.apply_async(cls._calc_factor_loading_proc,
                              args=(
                                  stock_info.symbol,
                                  calc_date,
                                  q,
                              ))
            p.close()
            p.join()
            while not q.empty():
                apm_value = q.get(True)
                symbol_lst.append(apm_value[0])
                stat_lst.append(apm_value[1])
                ret20_lst.append(apm_value[2])

            assert len(stat_lst) == len(ret20_lst)
            assert len(stat_lst) == len(symbol_lst)

            # 2.2.构建APM因子
            # 2.2.1.将统计量stat对动量因子ret20j进行截面回归:stat_j = \beta * Ret20_j + \epsilon_j
            #     残差向量即为对应个股的APM因子
            # 截面回归之前,先对stat统计量和动量因子进行去极值和标准化处理
            stat_arr = np.array(stat_lst).reshape((len(stat_lst), 1))
            ret20_arr = np.array(ret20_lst).reshape((len(ret20_lst), 1))
            stat_arr = Utils.clean_extreme_value(stat_arr)
            stat_arr = Utils.normalize_data(stat_arr)
            ret20_arr = Utils.clean_extreme_value(ret20_arr)
            ret20_arr = Utils.normalize_data(ret20_arr)
            # 回归分析
            # ret20_arr = sm.add_constant(ret20_arr)
            apm_model = sm.OLS(stat_arr, ret20_arr)
            apm_result = apm_model.fit()
            apm_lst = list(np.around(apm_result.resid, 6))  # amp因子载荷精确到6位小数
            assert len(apm_lst) == len(symbol_lst)
            # 2.2.2.构造APM因子字典,并持久化
            date_label = Utils.get_trading_days(calc_date, ndays=2)[1]
            dict_apm = {
                'date': [date_label] * len(symbol_lst),
                'id': symbol_lst,
                'factorvalue': apm_lst
            }
            if save:
                Utils.factor_loading_persistent(cls._db_file,
                                                calc_date.strftime('%Y%m%d'),
                                                dict_apm)

            # 2.3.构建PureAPM因子
            # 将stat_arr转换为DataFrame, 此时的stat_arr已经经过了去极值和标准化处理
            df_stat = DataFrame(stat_arr, index=symbol_lst, columns=['stat'])
            # 取得提纯的因变量因子
            df_dependent_factor = cls.get_dependent_factors(calc_date)
            # 将df_stat和因变量因子拼接
            df_data = pd.concat([df_stat, df_dependent_factor],
                                axis=1,
                                join='inner')
            # OLS回归,提纯APM因子
            arr_data = np.array(df_data)
            pure_apm_model = sm.OLS(arr_data[:, 0], arr_data[:, 1:])
            pure_apm_result = pure_apm_model.fit()
            pure_apm_lst = list(np.around(pure_apm_result.resid, 6))
            pure_symbol_lst = list(df_data.index)
            assert len(pure_apm_lst) == len(pure_symbol_lst)
            # 构造pure_apm因子字典,并持久化
            dict_pure_apm = {
                'date': [date_label] * len(pure_symbol_lst),
                'id': pure_symbol_lst,
                'factorvalue': pure_apm_lst
            }
            pure_apm_db_file = os.path.join(factor_ct.FACTOR_DB.db_path,
                                            factor_ct.APM_CT.pure_apm_db_file)
            if save:
                Utils.factor_loading_persistent(pure_apm_db_file,
                                                calc_date.strftime('%Y%m%d'),
                                                dict_pure_apm)
            # 休息360秒
            logging.info('Suspended for 360s.')
            time.sleep(360)
        return dict_apm