예제 #1
0
 def nav_series(self,
                start_date,
                end_date,
                freq='1d',
                start_point=1,
                benchmark=None,
                divider=1):
     dates = data_source.trade_calendar.get_trade_days(start_date,
                                                       end_date,
                                                       freq,
                                                       retstr=None)
     nav = self.uqer_db.run_api("FundNavGet",
                                ticker=self.fund_id,
                                beginDate=start_date,
                                endDate=end_date,
                                field=['endDate', 'ADJUST_NAV'])
     nav.rename(columns={
         'endDate': 'date',
         'ADJUST_NAV': 'NAV'
     },
                inplace=True)
     nav['date'] = pd.to_datetime(nav['date'])
     nav.set_index('date', inplace=True)
     ret = nav['NAV'].pct_change() / divider
     if benchmark is not None:
         benchmark_ret = data_source.load_factor(
             'daily_returns_%',
             '/indexprices/',
             ids=[benchmark],
             dates=ret.index.tolist()).reset_index('IDs', drop=True) / 100
         ret = ret - benchmark_ret['daily_returns_%']
     nav = (1.0 + ret).cumprod().fillna(1.0)
     nav = nav.reindex(dates, method='ffill') / nav.iloc[0] * start_point
     nav.name = 'nav'
     return nav
예제 #2
0
    def _create_signal_and_stockpool(self, signal, stock_pool, dates):
        from collections import Iterable
        if isinstance(signal, dict):
            signal = data_source.load_factor(signal['factor_name'],
                                             signal['factor_dir'],
                                             dates=dates).iloc[:, 0]
        elif dates is not None:
            if isinstance(dates, Iterable):
                dates = list(dates)
            signal = signal.loc[dates]
        else:
            raise KeyError("Incorrect Parameters!")
        signal.dropna(inplace=True)

        if isinstance(stock_pool, pd.Series):
            stock_pool_valid = typical(stock_pool.to_frame())
        elif isinstance(stock_pool, str):
            stock_pool = data_source.sector.get_index_members(ids=stock_pool,
                                                              dates=dates)
            stock_pool_valid = typical(stock_pool)
        else:
            stock_pool_valid = typical(stock_pool)
        stock_pool_valid = _intersection(signal, stock_pool_valid)
        estu = self.ds.load_factors(['Estu'], dates=dates)
        stock_pool_valid = _intersection(estu[estu['Estu'] == 1],
                                         stock_pool_valid)
        return signal, stock_pool_valid.reset_index(level=1)['IDs']
예제 #3
0
def cal_factor_group_return(factor_data, periods=(20,), prices=None, group_by=None,
    quantiles=5, freq='1d', **kwargs):
    """基于alphalens计算因子收益率"""
    stocklist = pd.DataFrame(np.ones(len(factor_data)), index=factor_data.index, columns=['stocklist'])
    stocklist = stockFilter.typical(stocklist)
    factor_data = factor_data.reindex(stocklist.index)

    start = factor_data.index.get_level_values('date').min()
    start = tc.tradeDayOffset(start, -5)
    end = factor_data.index.get_level_values('date').max()
    end = tc.tradeDayOffset(end, max(periods)+1, freq=freq)
    if prices is None:
        prices = data_source.load_factor('adj_close', '/stocks/', start_date=start,
                                         end_date=end)['adj_close'].unstack()
    elif isinstance(prices, pd.DataFrame):
        if prices.index.nlevels == 2:
            prices = prices.iloc[:, 0].unstack()
    else:
        raise ValueError('prices 格式非法!')
    if freq != '1d':
        date_index = tc.get_trade_days(start, end, freq, retstr=None)
        prices = prices.reindex(date_index, copy=False)
    if_groupby = group_by is not None
    merge_data = get_clean_factor_and_forward_returns(factor_data, prices, group_by,
                                                      periods=periods, binning_by_group=if_groupby,
                                                      **kwargs)
    return merge_data
예제 #4
0
def cal_ic_by_alphalens(factor_data, prices=None, group_by=None, periods=(20,), **kwargs):
    """调用alphalens计算因子IC
    """
    factor_data = factor_data.copy()
    if isinstance(factor_data, pd.DataFrame):
        factor_data = factor_data.iloc[:, 0]
    factor_data.index.names = ['date', 'asset']

    if prices is None:
        start = factor_data.index.get_level_values('date').min()
        start = tc.tradeDayOffset(start, -5)
        end = factor_data.index.get_level_values('date').max()
        end = tc.tradeDayOffset(end, max(periods))
        prices = data_source.load_factor('adj_close', '/stocks/', start_date=start,
                                         end_date=end)['adj_close'].unstack()
    elif isinstance(prices, pd.DataFrame):
        if prices.index.nlevels == 2:
            prices = prices.iloc[:, 0].unstack()
    else:
        raise ValueError('prices 格式非法!')
    
    merge_data = get_clean_factor_and_forward_returns(factor_data, prices,
                                                      group_by, periods=periods, **kwargs)
    by_group = group_by is not None
    ic = factor_information_coefficient(merge_data, group_adjust=False, by_group=by_group)
    return ic
예제 #5
0
def get_risky_stocks(start, end, **kwargs):
    dates = data_source.trade_calendar.get_trade_days(start, end, retstr=None)
    subscore = data_source.load_factor('sub_score_of_risky_stocks', '/indexes/').iloc[:, 0].unstack().\
        reindex(dates, method='nearest')
    totalscore = data_source.load_factor('total_score_of_risky_stocks', '/indexes/').iloc[:, 0].unstack().\
        reindex(dates, method='nearest')
    totalscorev2 = data_source.load_factor('total_score_of_risky_stocks_v2', '/indexes/').iloc[:, 0].unstack().\
        reindex(dates, method='nearest')
    subscore, totalscore = subscore.align(totalscore, fill_value=0)
    risky_stocks = ((subscore + totalscore) >=
                    1).stack().to_frame('risky_stocks').astype('int')
    risky_stocks.index.names = ['date', 'IDs']
    kwargs['data_source'].h5DB.save_factor(risky_stocks, '/indexes/')
    kwargs['data_source'].h5DB.save_factor(
        subscore.stack().to_frame('risky_stocks_subscore').rename_axis(
            ['date', 'IDs']), '/indexes/')
    kwargs['data_source'].h5DB.save_factor(
        totalscorev2.stack().to_frame('risky_stocks_totalscore').rename_axis(
            ['date', 'IDs']), '/indexes/')
예제 #6
0
파일: universe.py 프로젝트: rlcjj/Packages
def u_100001(start, end, **kwargs):
    """
    1. 连续两年扣非净利润不少于五千万
    2. 上市时间超过两年
    """
    datasource = kwargs['data_source']
    dates = datasource.trade_calendar.get_trade_days(start, end, '1d')
    list_days = get_go_market_days(dates)
    np_ly0 = data_source.load_factor('net_profit_deduct_nonprofit',
                                     '/stock_profit/',
                                     dates=dates)
    np_ly1 = data_source.load_factor('net_profit_deduct_nonprofit_ly1',
                                     '/stock_profit/',
                                     dates=dates)
    data = list_days.join([np_ly0, np_ly1])
    new = data.eval(
        "list_days>0 & net_profit_deduct_nonprofit>50000000 & net_profit_deduct_nonprofit_ly1>50000000"
    )
    datasource.h5DB.save_factor(
        new.to_frame('_100001').astype('int'), '/indexes/')
예제 #7
0
 def get_return(self, start_date=None, end_date=None, dates=None):
     """每个板块市值加权的日收益率"""
     dummy = self.get(start_date, end_date, dates)
     mv = data_source.load_factor('float_mkt_value',
                                  '/stocks/',
                                  start_date=start_date,
                                  end_date=end_date,
                                  dates=dates)**0.5
     mv = mv.reindex(dummy.index, copy=False)
     r = data_source.load_factor('daily_returns',
                                 '/stocks/',
                                 start_date=start_date,
                                 end_date=end_date,
                                 dates=dates)
     r = r.reindex(dummy.index, copy=False)
     r_mv = r.values * mv.values
     dummy_ret = pd.DataFrame(r_mv * dummy.values,
                              index=dummy.index,
                              columns=dummy.columns).groupby('date').sum()
     dummy_weight = pd.DataFrame(
         mv.values * dummy.values, index=dummy.index,
         columns=dummy.columns).groupby('date').sum()
     return dummy_ret / dummy_weight
예제 #8
0
def NeutralizeBySizeIndu(factor_data,
                         factor_name,
                         std_qt=True,
                         indu_name='中信一级',
                         drop_first_indu=True,
                         **kwargs):
    """ 为因子进行市值和行业中性化

    市值采用对数市值(百万为单位);

    Paramters:
    ==========
    factor_data: pd.DataFrame(index:[date, IDs])
        因子数值
    factor_name: str
        因子名称
    std_qt: bool
        在中性化之前是否进行分位数标准化,默认为True
    indu_name: strs
        行业选取
    """
    from FactorLib.data_source.base_data_source_h5 import data_source
    dates = factor_data.index.get_level_values(0).unique().tolist()
    ids = factor_data.index.get_level_values(1).unique().tolist()
    lncap = np.log(data_source.load_factor('float_mkt_value', '/stocks/', dates=dates, ids=ids) / 10000.0). \
        rename(columns={'float_mkt_value': 'lncap'}).reindex(factor_data.index)
    indu_flag = data_source.sector.get_industry_dummy(
        None,
        industry=indu_name,
        dates=dates,
        idx=factor_data.dropna(),
        drop_first=drop_first_indu)
    indu_flag = indu_flag[(indu_flag == 1).any(axis=1)]

    if std_qt:
        factor_data = StandardByQT(factor_data, factor_name).to_frame()
        lncap = StandardByQT(lncap, 'lncap').to_frame()
    industry_names = list(indu_flag.columns)
    indep_data = lncap.join(indu_flag, how='inner')
    resid = Orthogonalize(factor_data, indep_data, factor_name,
                          industry_names + ['lncap'], **kwargs)
    return resid.reindex(factor_data.index)
예제 #9
0
 def load_stock_return(self, date):
     return data_source.load_factor(
         'daily_returns_%', '/stocks/', dates=[date]) / 100
예제 #10
0
    def _add_userlimit(self, user_conf, **kwargs):
        """添加用户自定义的因子限制
        user_conf: dict
        自定义限制条件 :
            factor_data : pd.DataFrame
            自定义风险因子的数据, 每一列是一个因子,[date, IDs]为索引

            factor_name : str
            若factor_data为None, factor_name和factor_dir必须非空, h5db
            会从中提取数据

            factor_dir : str
            若factor_data为None, factor_name和factor_dir必须非空, h5db
            会从中提取数据

            limit : float or list of floats or dict
            每个风险因子的限制值, 若limit是列表,其长度必须与因子个数相同
            若limit是字典型, key值为factor_data中的列名, value是列表或者
            scalar

            standard : bool
            在加入到优化器之前是否对输入的因子进行QT标准化

        kwargs: dict
            active : bool
            限制条件是否是相对行业的限制,默认为True

            sense : str
            限制类型: 'E': equal / 'G': greater than / 'L': lower than
        """
        if user_conf.get('factor_data', pd.DataFrame()).empty:
            factor_name = user_conf.get('factor_name')
            factor_dir = user_conf.get('factor_dir')
            factor_data = data_source.load_factor(factor_name, factor_dir, dates=[self._date])
        else:
            factor_data = user_conf.get('factor_data')
            factor_name = factor_data.columns
        limit = user_conf.get('limit')
        is_standard = user_conf.get('standard', False)
        is_active = kwargs.get('active', False)
        sense = kwargs.get('sense', 'E')
        limit_type = kwargs.get('limit_type', 'value')

        if isinstance(factor_data, pd.Series):
            factor_data = factor_data.to_frame(factor_name)
            factor_name = [factor_name]

        limit_min = {}
        limit_max = {}
        limit_sense = {}
        if isinstance(limit, dict):
            for k, v in limit.items():
                if isinstance(v, list):
                    assert v[0] <= v[1]
                    limit_min[k] = v[0]
                    limit_max[k] = v[1]
                elif isinstance(v, (int, float)):
                    limit_sense[k] = float(v)
                else:
                    raise ValueError("自定义因子敞口限定值不合法!")
        if isinstance(limit, (int, float)):
            limit_sense = {x: limit for x in factor_name}
        if isinstance(limit, list):
            if len(limit) != len(factor_name):
                raise ValueError("limit dimension dose not match factor dimension")
            limit_sense = {x: y for x, y in zip(factor_name, limit)}

        if isinstance(sense, str):
            sense = [sense] * len(limit_sense)
        else:
            if len(sense) != len(limit_sense):
                raise ValueError("sense dimension dose not match factor dimension")

        for f, l in limit_min.items():
            if is_standard:
                factor_data2 = StandardByQT(factor_data, f).loc[self._date].reindex(self._allids, fill_value=0.0)
            else:
                factor_data2 = factor_data.loc[self._date, f].reindex(self._allids, fill_value=0.0)
            if is_active:
                if limit_type == 'std':
                    l *= self._prepare_benchmark_userexpo_std(factor_data2)
                l += self._prepare_benchmark_userexpo(factor_data2)
            portfolio_factor = factor_data2.loc[self._allids]
            if np.any(np.isnan(portfolio_factor.values)):
                raise ValueError("自定义因子因子数据存在缺失值!")
            lin_expr = []
            sense = ['G']
            rhs = [l]
            name = [get_available_names(x, self.names_used) for x in ['user_%s' % f]]
            lin_expr.append([portfolio_factor.index.tolist(), portfolio_factor.values.tolist()])
            self._c.linear_constraints.add(lin_expr=lin_expr, senses=sense, rhs=rhs, names=name)
            self.names_used += name

        for f, l in limit_max.items():
            if is_standard:
                factor_data2 = StandardByQT(factor_data, f).loc[self._date].reindex(self._allids, fill_value=0.0)
            else:
                factor_data2 = factor_data.loc[self._date, f].reindex(self._allids, fill_value=0.0)
            if is_active:
                if limit_type == 'std':
                    l *= self._prepare_benchmark_userexpo_std(factor_data2)
                l += self._prepare_benchmark_userexpo(factor_data2)
            portfolio_factor = factor_data2.loc[self._allids]
            if np.any(np.isnan(portfolio_factor.values)):
                raise ValueError("自定义因子因子数据存在缺失值!")
            lin_expr = []
            sense = ['L']
            rhs = [l]
            name = [get_available_names(x, self.names_used) for x in ['user_%s' % f]]
            lin_expr.append([portfolio_factor.index.tolist(), portfolio_factor.values.tolist()])
            self._c.linear_constraints.add(lin_expr=lin_expr, senses=sense, rhs=rhs, names=name)
            self.names_used += name

        for f, s in zip(limit_sense, sense):
            l = limit_sense[f]
            if is_standard:
                factor_data2 = StandardByQT(factor_data, f).loc[self._date].reindex(self._allids, fill_value=0.0)
            else:
                factor_data2 = factor_data.loc[self._date, f].reindex(self._allids, fill_value=0.0)
            if is_active:
                if limit_type == 'std':
                    l *= self._prepare_benchmark_userexpo_std(factor_data2)
                l += self._prepare_benchmark_userexpo(factor_data2)
            portfolio_factor = factor_data2.loc[self._allids]
            if np.any(np.isnan(portfolio_factor.values)):
                raise ValueError("自定义因子因子数据存在缺失值!")
            lin_expr = []
            sense = [s]
            rhs = [l]
            name = [get_available_names(x, self.names_used) for x in ['user_%s' % f]]
            lin_expr.append([portfolio_factor.index.tolist(), portfolio_factor.values.tolist()])
            self._c.linear_constraints.add(lin_expr=lin_expr, senses=sense, rhs=rhs, names=name)
            self.names_used += name
예제 #11
0
    style = risk_ds.load_style_factor(
        factor_names=risk_factor_names,
        ids=factor_data.index.get_level_values('IDs').unique().tolist(),
        dates=factor_data.index.get_level_values('date').unique().tolist())
    if std_indep:
        style = style.apply(lambda x: StandardByQT(x.to_frame(), x.name))
    drop_first_indu = kwargs.get('drop_first_indu', True)
    indu = data_source.sector.get_industry_dummy(idx=factor_data,
                                                 industry=indu_name,
                                                 drop_first=drop_first_indu)
    # indu = indu[(indu==1).any(axis=1)]
    X = style.join(indu, how='inner')
    add_const = kwargs.get('add_const', True)
    resid = Orthogonalize(factor_data,
                          X,
                          left_name=factor_name,
                          right_name=X.columns.tolist(),
                          add_const=add_const)
    return resid


if __name__ == '__main__':
    from FactorLib.data_source.base_data_source_h5 import data_source
    factor_data = data_source.load_factor('ths_click_ratio',
                                          '/stock_alternative/',
                                          start_date='20120409',
                                          end_date='20170531')
    # profile
    r = NeutralizeBySizeIndu(factor_data, 'ths_click_ratio')
    print(r)
    # data_source.h5DB.save_factor(r, '/stock_alternative/')
예제 #12
0
    stocklist = pd.DataFrame(np.ones(len(factor_data)), index=factor_data.index, columns=['stocklist'])
    stocklist = stockFilter.typical(stocklist)
    factor_data = factor_data.reindex(stocklist.index)

    start = factor_data.index.get_level_values('date').min()
    start = tc.tradeDayOffset(start, -5)
    end = factor_data.index.get_level_values('date').max()
    end = tc.tradeDayOffset(end, max(periods)+1, freq=freq)
    if prices is None:
        prices = data_source.load_factor('adj_close', '/stocks/', start_date=start,
                                         end_date=end)['adj_close'].unstack()
    elif isinstance(prices, pd.DataFrame):
        if prices.index.nlevels == 2:
            prices = prices.iloc[:, 0].unstack()
    else:
        raise ValueError('prices 格式非法!')
    if freq != '1d':
        date_index = tc.get_trade_days(start, end, freq, retstr=None)
        prices = prices.reindex(date_index, copy=False)
    if_groupby = group_by is not None
    merge_data = get_clean_factor_and_forward_returns(factor_data, prices, group_by,
                                                      periods=periods, binning_by_group=if_groupby,
                                                      **kwargs)
    return merge_data


if __name__ == '__main__':
    bp_div_median = data_source.load_factor('bp_divide_median', '/stock_value/')
    ic = cal_ic(bp_div_median,  factor_name='bp_divide_median', rank=True, stock_validation='typical')