예제 #1
0
파일: bayesian.py 프로젝트: no7dw/pyfolio
def _plot_bayes_cone(returns_train, returns_test,
                     preds, plot_train_len=None, ax=None):
    if ax is None:
        ax = plt.gca()

    returns_train_cum = cum_returns(returns_train, starting_value=1.)
    returns_test_cum = cum_returns(returns_test,
                                   starting_value=returns_train_cum.iloc[-1])

    perc = compute_bayes_cone(preds, starting_value=returns_train_cum.iloc[-1])
    # Add indices
    perc = {k: pd.Series(v, index=returns_test.index) for k, v in perc.items()}

    returns_test_cum_rel = returns_test_cum
    # Stitch together train and test
    returns_train_cum.loc[returns_test_cum_rel.index[0]] = \
        returns_test_cum_rel.iloc[0]

    # Plotting
    if plot_train_len is not None:
        returns_train_cum = returns_train_cum.iloc[-plot_train_len:]

    returns_train_cum.plot(ax=ax, color='g', label='In-sample')
    returns_test_cum_rel.plot(ax=ax, color='r', label='Out-of-sample')

    ax.fill_between(returns_test.index, perc[5], perc[95], alpha=.3)
    ax.fill_between(returns_test.index, perc[25], perc[75], alpha=.6)
    ax.legend(loc='best', frameon=True, framealpha=0.5)
    ax.set_title('Bayesian cone')
    ax.set_xlabel('')
    ax.set_ylabel('Cumulative returns')

    return ax
예제 #2
0
    def calculate_metrics(self):
        self.benchmark_period_returns = \
            cum_returns(self.benchmark_returns).iloc[-1]

        self.algorithm_period_returns = \
            cum_returns(self.algorithm_returns).iloc[-1]

        if not self.algorithm_returns.index.equals(
                self.benchmark_returns.index):
            message = "Mismatch between benchmark_returns ({bm_count}) and \
            algorithm_returns ({algo_count}) in range {start} : {end}"

            message = message.format(bm_count=len(self.benchmark_returns),
                                     algo_count=len(self.algorithm_returns),
                                     start=self._start_session,
                                     end=self._end_session)
            raise Exception(message)

        self.num_trading_days = len(self.benchmark_returns)

        self.mean_algorithm_returns = (
            self.algorithm_returns.cumsum() /
            np.arange(1, self.num_trading_days + 1, dtype=np.float64))

        self.benchmark_volatility = annual_volatility(self.benchmark_returns)
        self.algorithm_volatility = annual_volatility(self.algorithm_returns)

        self.treasury_period_return = choose_treasury(
            self.treasury_curves,
            self._start_session,
            self._end_session,
            self.trading_calendar,
        )
        self.sharpe = sharpe_ratio(self.algorithm_returns, )
        # The consumer currently expects a 0.0 value for sharpe in period,
        # this differs from cumulative which was np.nan.
        # When factoring out the sharpe_ratio, the different return types
        # were collapsed into `np.nan`.
        # TODO: Either fix consumer to accept `np.nan` or make the
        # `sharpe_ratio` return type configurable.
        # In the meantime, convert nan values to 0.0
        if pd.isnull(self.sharpe):
            self.sharpe = 0.0
        self.downside_risk = downside_risk(self.algorithm_returns.values)
        self.sortino = sortino_ratio(
            self.algorithm_returns.values,
            _downside_risk=self.downside_risk,
        )
        self.information = information_ratio(
            self.algorithm_returns.values,
            self.benchmark_returns.values,
        )
        self.alpha, self.beta = alpha_beta_aligned(
            self.algorithm_returns.values,
            self.benchmark_returns.values,
        )
        self.excess_return = self.algorithm_period_returns - \
            self.treasury_period_return
        self.max_drawdown = max_drawdown(self.algorithm_returns.values)
        self.max_leverage = self.calculate_max_leverage()
예제 #3
0
def _plot_bayes_cone(returns_train, returns_test,
                     preds, plot_train_len=None, ax=None):
    if ax is None:
        ax = plt.gca()

    returns_train_cum = cum_returns(returns_train, starting_value=1.)
    returns_test_cum = cum_returns(returns_test,
                                   starting_value=returns_train_cum.iloc[-1])

    perc = compute_bayes_cone(preds, starting_value=returns_train_cum.iloc[-1])
    # Add indices
    perc = {k: pd.Series(v, index=returns_test.index) for k, v in perc.items()}

    returns_test_cum_rel = returns_test_cum
    # Stitch together train and test
    returns_train_cum.loc[returns_test_cum_rel.index[0]] = \
        returns_test_cum_rel.iloc[0]

    # Plotting
    if plot_train_len is not None:
        returns_train_cum = returns_train_cum.iloc[-plot_train_len:]

    returns_train_cum.plot(ax=ax, color='g', label='In-sample')
    returns_test_cum_rel.plot(ax=ax, color='r', label='Out-of-sample')

    ax.fill_between(returns_test.index, perc[5], perc[95], alpha=.3)
    ax.fill_between(returns_test.index, perc[25], perc[75], alpha=.6)
    ax.legend(loc='best', frameon=True, framealpha=0.5)
    ax.set_title('Bayesian cone')
    ax.set_xlabel('')
    ax.set_ylabel('Cumulative returns')

    return ax
예제 #4
0
def _cumulative_returns_less_costs(returns, costs):
    """
    Compute cumulative returns, less costs.
    """
    if costs is None:
        return ep.cum_returns(returns)
    return ep.cum_returns(returns - costs)
예제 #5
0
def plot_alpha_curve(return_dict):
    """
    :param return_dict: dict, returnName: [returnData, ReturnType]
    :return:
    """

    strat_return = return_dict['stratReturn'][0]
    strat_return_type = return_dict['stratReturn'][1]
    benchmark_return = return_dict['benchmarkReturn'][0]
    benchmark_return_type = return_dict['benchmarkReturn'][1]
    ptf_return = return_dict['ptfReturn'][0]
    ptf_return_type = return_dict['ptfReturn'][1]

    strat_return = cum_returns(strat_return,
                               starting_value=1.0) if strat_return_type == ReturnType.NonCumul else strat_return
    benchmark_return = cum_returns(benchmark_return,
                                   starting_value=1.0) if benchmark_return_type == ReturnType.NonCumul \
        else benchmark_return
    ptf_return = cum_returns(ptf_return, starting_value=1.0) if ptf_return_type == ReturnType.NonCumul else ptf_return

    data = pd.concat([ptf_return, strat_return, benchmark_return], join_axes=[strat_return.index], axis=1)
    # 如果缺失起始数据, 设置为1.0 (起始净值)
    data = data.fillna(1.0)
    data.columns = [u'策略对冲收益', u'策略未对冲收益', u'指数收益']
    ax = data.plot(figsize=(16, 6), title=u'策略收益演示图')
    fig_style(ax, [u'策略对冲净值', u'策略未对冲净值', u'指数收益'], x_label=u'交易日', y_label=u'净值',
              legend_loc='upper left')
    plt.show()
예제 #6
0
def plot_rolling_returns_multiple(returns_arr, factor_returns=None, logy=False, ax=None, names_arr=None, extra_bm=0):
    """
    Plots cumulative rolling returns versus some benchmarks'.

    This is based on https://github.com/quantopian/pyfolio/blob/master/pyfolio/plotting.py,
    but modified to plot multiple rolling returns on the same graph

    Arguments
    ----------
    returns_arr : array of pd.Series. Each element contains daily returns of the strategy, noncumulative.t.
    factor_returns : pd.Series, optional
        Daily noncumulative returns of the benchmark factor to which betas are
        computed. Usually a benchmark such as market returns.
        - This is in the same style as returns.
    logy : bool, optional
        Whether to log-scale the y-axis.
    ax : matplotlib.Axes, optional
        Axes upon which to plot.
    names_arr: array of names for the plots, optional
    extra_bm: number of extra benchmarks. These will be assumed to be at the front of returns_array and will be plotted differently

    Returns
    -------
    ax : matplotlib.Axes
        The axes that were plotted on.
    """

    if ax is None:
        ax = plt.gca()

    ax.set_xlabel('')
    ax.set_ylabel('Cumulative returns')
    ax.set_yscale('log' if logy else 'linear')

    for i in range(len(returns_arr)):
        # pData = perfData[i]
        # returns, positions, transactions = pf.utils.extract_rets_pos_txn_from_zipline(pData)

        returns = returns_arr[i]
        returns.name = 'Portfolio %i' % i if names_arr is None else names_arr[i]

        cum_rets = ep.cum_returns(returns, 1.0)
        is_cum_returns = cum_rets
        if (i == 0 and factor_returns is not None):
            cum_factor_returns = ep.cum_returns(factor_returns[cum_rets.index], 1.0)
            cum_factor_returns.plot(lw=1, color='gray', label=factor_returns.name, alpha=0.60, ax=ax, style=['-.'])

        is_cum_returns.plot(lw=1, alpha=0.6, label=returns.name, ax=ax, style=['-.'] if (i < extra_bm) else None)
        # is_cum_returns.plot(lw=1, alpha=0.6, label=returns.name, ax=ax)

    years = mdates.YearLocator()   # every year
    months = mdates.MonthLocator()  # every month
    major_fmt = mdates.DateFormatter('%b %Y')

    ax.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
    ax.xaxis.set_major_locator(years)
    ax.xaxis.set_major_formatter(major_fmt)
    ax.xaxis.set_minor_locator(months)

    return ax
예제 #7
0
def _cumulative_returns_less_costs(returns, costs):
    """
    Compute cumulative returns, less costs.
    """
    if costs is None:
        return ep.cum_returns(returns)
    return ep.cum_returns(returns - costs)
예제 #8
0
    def rolling_drawdown(self, returns):
        out = np.empty(returns.shape[1:])

        returns_1d = returns.ndim == 1

        if len(returns) < 1:
            out[()] = np.nan

            if returns_1d:
                out = out.item()

            return out

        returns_array = np.asanyarray(returns)

        cumulative = np.empty((returns.shape[0] + 1, ) + returns.shape[1:],
                              dtype='float64')

        cumulative[0] = start = 100

        empyrical.cum_returns(returns_array,
                              starting_value=start,
                              out=cumulative[1:])

        max_return = np.fmax.accumulate(cumulative, axis=0)

        out = (cumulative - max_return) / max_return

        out = pd.Series(out[1:])

        return out
예제 #9
0
 def test_cumulative(self):
     res_a = empyrical.cum_returns(ret['a']).rename('a')
     res_b = empyrical.cum_returns(ret['b']).rename('b')
     res_c = empyrical.cum_returns(ret['c']).rename('c')
     pd.testing.assert_series_equal(ret['a'].vbt.returns.cumulative(),
                                    res_a)
     pd.testing.assert_frame_equal(ret.vbt.returns.cumulative(),
                                   pd.concat([res_a, res_b, res_c], axis=1))
예제 #10
0
def ptf_re_balance(return_dict, margin_prop=0.0, re_balance_freq=FreqType.EOM):
    """
    :param return_dict: dict, returnName: [returnData, ReturnType]
    :param margin_prop: float, optional, proportion of the init ptf that is allocated to futures account
    :param re_balance_freq: str, optional, rebalance frequncy = daily/monthly/yearly
    :return: pd.Series, daily cumul returns of hedged ptf
    """
    strat_return = return_dict['stratReturn'][0]
    strat_return_type = return_dict['stratReturn'][1]
    benchmark_return = return_dict['benchmarkReturn'][0]
    benchmark_return_type = return_dict['benchmarkReturn'][1]

    strat_return = empyrical.cum_returns(strat_return,
                                         starting_value=1.0) if strat_return_type == ReturnType.NonCumul \
        else strat_return
    benchmark_return = empyrical.cum_returns(benchmark_return,
                                             starting_value=1.0) if benchmark_return_type == ReturnType.NonCumul \
        else benchmark_return

    pyFinAssert(0 <= margin_prop <= 1.0, ValueError,
                " margin prop must be between 0 and 1")
    hedged_ptf_return = pd.Series()
    # merge strat and index returns together
    return_data = pd.concat([strat_return, benchmark_return],
                            axis=1,
                            join_axes=[strat_return.index])
    return_data.columns = ['strategy', 'benchmark']
    return_data.index = pd.to_datetime(return_data.index)
    pyFinAssert(return_data.isnull().values.any() == False, ValueError,
                " returnData has NaN values")
    regroup_total_return = regroup_by_re_balance_freq(return_data,
                                                      re_balance_freq)

    # first date is a balance date
    re_balance_base_nav = 1.0
    norm_base_return = return_data.iloc[0]
    for name, group in regroup_total_return:
        # compute the hedged return
        norm_strat_return = group['strategy'] / norm_base_return['strategy']
        norm_benchmark_return = group['benchmark'] / norm_base_return[
            'benchmark']
        hedged_return = (1 + (norm_strat_return - norm_benchmark_return) *
                         (1 - margin_prop)) * re_balance_base_nav

        # update the re_balance base NPV
        re_balance_base_nav = hedged_return.iloc[-1]

        # update norm base return
        norm_base_return = group.iloc[-1]

        # merge into ptfValue
        hedged_ptf_return = pd.concat([hedged_ptf_return, hedged_return],
                                      axis=0)

    hedged_ptf_return.name = 'hedgedPtfReturn'
    hedged_ptf_return.index.name = return_data.index.name

    return hedged_ptf_return
예제 #11
0
def plot_returns(perf_attrib_data, cost=None, ax=None):
    """
    Plot total, specific, and common returns.

    Parameters
    ----------
    perf_attrib_data : pd.DataFrame
        df with factors, common returns, and specific returns as columns,
        and datetimes as index. Assumes the `total_returns` column is NOT
        cost adjusted.
        - Example:
                        momentum  reversal  common_returns  specific_returns
            dt
            2017-01-01  0.249087  0.935925        1.185012          1.185012
            2017-01-02 -0.003194 -0.400786       -0.403980         -0.403980

    cost : pd.Series, optional
        if present, gets subtracted from `perf_attrib_data['total_returns']`,
        and gets plotted separately

    ax :  matplotlib.axes.Axes
        axes on which plots are made. if None, current axes will be used

    Returns
    -------
    ax :  matplotlib.axes.Axes
    """

    if ax is None:
        ax = plt.gca()

    returns = perf_attrib_data['total_returns']
    total_returns_label = 'Total returns'

    if cost is not None:
        returns = returns - cost
        total_returns_label += ' (adjusted)'

    specific_returns = perf_attrib_data['specific_returns']
    common_returns = perf_attrib_data['common_returns']

    ax.plot(ep.cum_returns(returns), color='b', label=total_returns_label)
    ax.plot(ep.cum_returns(specific_returns),
            color='g',
            label='Cumulative specific returns')
    ax.plot(ep.cum_returns(common_returns),
            color='r',
            label='Cumulative common returns')

    if cost is not None:
        ax.plot(cost, color='p', label='Cost')

    ax.set_title('Time series of cumulative returns')
    ax.set_ylabel('Returns')

    configure_legend(ax)

    return ax
예제 #12
0
def plot_returns(perf_attrib_data, cost=None, ax=None):
    """
    Plot total, specific, and common returns.

    Parameters
    ----------
    perf_attrib_data : pd.DataFrame
        df with factors, common returns, and specific returns as columns,
        and datetimes as index. Assumes the `total_returns` column is NOT
        cost adjusted.
        - Example:
                        momentum  reversal  common_returns  specific_returns
            dt
            2017-01-01  0.249087  0.935925        1.185012          1.185012
            2017-01-02 -0.003194 -0.400786       -0.403980         -0.403980

    cost : pd.Series, optional
        if present, gets subtracted from `perf_attrib_data['total_returns']`,
        and gets plotted separately

    ax :  matplotlib.axes.Axes
        axes on which plots are made. if None, current axes will be used

    Returns
    -------
    ax :  matplotlib.axes.Axes
    """

    if ax is None:
        ax = plt.gca()

    returns = perf_attrib_data['total_returns']
    total_returns_label = 'Total returns'

    if cost is not None:
        returns = returns - cost
        total_returns_label += ' (adjusted)'

    specific_returns = perf_attrib_data['specific_returns']
    common_returns = perf_attrib_data['common_returns']

    ax.plot(ep.cum_returns(returns), color='b', label=total_returns_label)
    ax.plot(ep.cum_returns(specific_returns), color='g',
            label='Cumulative specific returns')
    ax.plot(ep.cum_returns(common_returns), color='r',
            label='Cumulative common returns')

    if cost is not None:
        ax.plot(cost, color='p', label='Cost')

    ax.set_title('Time series of cumulative returns')
    ax.set_ylabel('Returns')

    configure_legend(ax)

    return ax
예제 #13
0
    def plot_returns(self):
        self.portfolio_total_returns = empyrical.cum_returns(
            self.analysis_data.chart_data.returns) * 100
        self.benchmark_total_returns = empyrical.cum_returns(
            self.analysis_data.chart_data.benchmark_returns) * 100

        self.returns_ax.plot(self.portfolio_total_returns)
        self.returns_ax.plot(self.benchmark_total_returns)

        self.returns_ax.legend(['Strategy', 'SPY'], loc='upper left')
        self.returns_ax.set_ylabel('Return')
예제 #14
0
파일: utils.py 프로젝트: sentrip/pycoin
def plot_returns(returns, factor_returns, transactions=None):
    fig, ax = create_trading_figure([0.1, 0.2, 0.8, 0.7], 'DateTime',
                                    'Return ( % )')
    ax.xaxis.set_major_formatter(mdates.DateFormatter(fmt='%d-%m-%Y %H:%M'))
    fig.autofmt_xdate()

    returns = cum_returns(returns, starting_value=1) * 100
    factor_returns = cum_returns(factor_returns, starting_value=1) * 100
    x = [i.to_pydatetime() for i in returns.index]
    ex_x = [x[0]] + x + [x[-1]]
    ex_returns = pd.Series([100]).append(returns.append(pd.Series([100])))
    ex_factor_returns = pd.Series([100]).append(
        factor_returns.append(pd.Series([100])))
    ax.fill(ex_x, ex_returns, color=(0, 1, 0, 0.3), zorder=0)
    ax.fill(ex_x, ex_factor_returns, color=(1, 0, 0, 0.3), zorder=0)
    ax.plot(x, returns, color='g', zorder=2)
    ax.plot(x, factor_returns, color='r', zorder=1)

    ax.plot(x, [100] * len(x), color='w', zorder=2, linewidth=3)

    if transactions is not None:
        buys_sells = transactions.iloc[transactions['amount'].nonzero()[0]]
        buys = buys_sells['amount'][buys_sells['amount'] > 0]
        sells = buys_sells['amount'][buys_sells['amount'] < 0]
        ax.scatter(x=[i.to_pydatetime() for i in buys.index],
                   y=[returns[i] for i in buys.index],
                   c=[(0, 1, 0, 0.5)] * len(buys),
                   s=100,
                   marker='v',
                   edgecolor=(0, 1, 0, 0.9),
                   zorder=2)
        ax.scatter(x=[i.to_pydatetime() for i in sells.index],
                   y=[returns[i] for i in sells.index],
                   c=[(1, 0, 0, 0.5)] * len(buys),
                   s=100,
                   marker='^',
                   edgecolor=(1, 0, 0, 0.9),
                   zorder=2)
        fig.suptitle('Strategy returns %s\n%s to %s' %
                     (transactions['symbol'].iloc[0],
                      returns.index[0].strftime('%d-%m-%Y'),
                      returns.index[-1].strftime('%d-%m-%Y')))
    else:
        fig.suptitle('Strategy returns\n%s to %s' %
                     (returns.index[0].strftime('%d-%m-%Y'),
                      returns.index[-1].strftime('%d-%m-%Y')))

    ax.set_xlim(x[0], x[-1])
    d1, d2 = max(max(returns), max(factor_returns), 100) - 100, 100 - min(
        min(returns), min(factor_returns), 100)
    ax.set_ylim(100 - d2 * 1.1, 100 + d1 * 1.1)
    plt.show()
예제 #15
0
    def statsAndPlot(self):
        logger.info('Statistics and Plots')
        for account in self._accounts:
            logger.info('Account: %s', account.getName())
            logger.info(account.getRecorder().getRecords())

            logger.info('Official Report')
            logger.info(account.getOfficialRecorder().getRecords())
            rec = account.getOfficialRecorder().getRecords()
            from pprint import pprint
            pprint(recorder.Stats.getStats(rec.returns, rec.benchmark_returns))
            r_cum = empyrical.cum_returns(rec.returns)
            bc_cum = empyrical.cum_returns(rec.benchmark_returns)
            m = pd.DataFrame({'wealth': r_cum, 'benchmark': bc_cum})
            m2 = (1 + m) * 100
            # m2.plot(figsize=(15, 5), title = 'Wealth Curve')

            records = account.getRecorder().getRecords()
            ore = account.getOfficialRecorder().getRecords()
            lastLine = records.iloc[-1]
            lastLine.set_value('wealth', ore.iloc[-1].get_value('wealth'))
            lastLine.name += pd.Timedelta(days=1)
            records = records.append(lastLine)
            records['trades'] = records.order.apply(len)
            records['positionsNum'] = records.positions.apply(len)

            fig = plt.figure()
            ax1 = fig.add_subplot(2, 2, 1)
            ax2 = fig.add_subplot(2, 2, 2)
            ax3 = fig.add_subplot(2, 2, 3)

            m2.plot(figsize=(15, 5), title='Wealth Curve', ax=ax1)
            ax1.set_title('Wealth Curve')
            for label in ax1.get_xticklabels():
                label.set_rotation(20)
            ax2.plot(records['positionsNum'])
            ax2.set_title('Positions Number')
            for label in ax2.get_xticklabels():
                label.set_rotation(20)
            ts = records['trades'].groupby(pd.TimeGrouper(freq='1M')).sum()
            y_pos = np.arange(len(ts))
            ax3.bar(y_pos, ts)
            ax3.set_xticks(y_pos[::3])
            ax3.set_xticklabels(ts.index.strftime('%Y-%m')[::3], rotation=20)
            fmt = '%.0f%%'  # Format you want the ticks, e.g. '40%'
            yticks = mtick.FormatStrFormatter(fmt)
            ax3.yaxis.set_major_formatter(yticks)
            ax3.set_title('Turnover Rate')
            fig.show()
예제 #16
0
def compute_consistency_score(returns_test, preds):
    """
    Compute Bayesian consistency score.

    Parameters
    ----------
    returns_test : pd.Series
        Observed cumulative returns.
    preds : numpy.array
        Multiple (simulated) cumulative returns.

    Returns
    -------
    Consistency score
        Score from 100 (returns_test perfectly on the median line of the
        Bayesian cone spanned by preds) to 0 (returns_test completely
        outside of Bayesian cone.)
    """

    returns_test_cum = cum_returns(returns_test, starting_value=1.)
    cum_preds = np.cumprod(preds + 1, 1)

    q = [sp.stats.percentileofscore(cum_preds[:, i],
                                    returns_test_cum.iloc[i],
                                    kind='weak')
         for i in range(len(returns_test_cum))]
    # normalize to be from 100 (perfect median line) to 0 (completely outside
    # of cone)
    return 100 - np.abs(50 - np.mean(q)) / .5
예제 #17
0
def get_max_drawdown(returns):
    """
    Determines the maximum drawdown of a strategy.

    Parameters
    ----------
    returns : pd.Series
        Daily returns of the strategy, noncumulative.
        - See full explanation in :func:`~empyrical.cum_returns`.

    Returns
    -------
    float
        Maximum drawdown.

    Note
    -----
    See https://en.wikipedia.org/wiki/Drawdown_(economics) for more details.
    """

    returns = returns.copy()
    df_cum = ep.cum_returns(returns, 1.0)
    running_max = np.maximum.accumulate(df_cum)
    underwater = df_cum / running_max - 1
    return get_max_drawdown_underwater(underwater)
예제 #18
0
    def performance_measures(pct_chg, y):
        result = {}
        y_init = list(map(reverse_func, y))
        predict = pd.Series(index=pct_chg.index, data=y_init)
        predict.name = 'label'
        df = pd.concat([pct_chg, predict.shift(1)], axis=1)
        df['return'] = 0
        short_cond = (df['label'] - mid_type) < -epsilon
        long_cond = (df['label'] - mid_type) > epsilon
        df.loc[long_cond, 'return'] = pct_chg.loc[long_cond]
        df.loc[short_cond, 'return'] = -pct_chg[short_cond]
        returns = df['return']

        if 'Y0' in performance_types:
            Y0 = pd.Series(index=pct_chg.index, data=list(y))
            result['Y0'] = Y0
        if 'Y' in performance_types:
            result['Y'] = predict
        if 'returns' in performance_types:
            result['returns'] = returns
        if 'cum_returns' in performance_types:
            result['cum_returns'] = empyrical.cum_returns(returns)
        if 'annual_return' in performance_types:
            result['annual_return'] = empyrical.annual_return(returns)
        if 'sharpe_ratio' in performance_types:
            result['sharpe_ratio'] = empyrical.sharpe_ratio(returns)
        return result
예제 #19
0
def summarize_paths(samples, cone_std=(1.0, 1.5, 2.0), starting_value=1.0):
    """
    Gnerate the upper and lower bounds of an n standard deviation
    cone of forecasted cumulative returns.

    Parameters
    ----------
    samples : numpy.ndarray
        Alternative paths, or series of possible outcomes.
    cone_std : list of int/float
        Number of standard devations to use in the boundaries of
        the cone. If multiple values are passed, cone bounds will
        be generated for each value.

    Returns
    -------
    samples : pandas.core.frame.DataFrame
    """

    cum_samples = ep.cum_returns(samples.T, starting_value=starting_value).T

    cum_mean = cum_samples.mean(axis=0)
    cum_std = cum_samples.std(axis=0)

    if isinstance(cone_std, (float, int)):
        cone_std = [cone_std]

    cone_bounds = pd.DataFrame(columns=pd.Float64Index([]))
    for num_std in cone_std:
        cone_bounds.loc[:, float(num_std)] = cum_mean + cum_std * num_std
        cone_bounds.loc[:, float(-num_std)] = cum_mean - cum_std * num_std

    return cone_bounds
예제 #20
0
def compute_consistency_score(returns_test, preds):
    """
    Compute Bayesian consistency score.

    Parameters
    ----------
    returns_test : pd.Series
        Observed cumulative returns.
    preds : numpy.array
        Multiple (simulated) cumulative returns.

    Returns
    -------
    Consistency score
        Score from 100 (returns_test perfectly on the median line of the
        Bayesian cone spanned by preds) to 0 (returns_test completely
        outside of Bayesian cone.)
    """

    returns_test_cum = cum_returns(returns_test, starting_value=1.)
    cum_preds = np.cumprod(preds + 1, 1)

    q = [
        sp.stats.percentileofscore(cum_preds[:, i],
                                   returns_test_cum.iloc[i],
                                   kind='weak')
        for i in range(len(returns_test_cum))
    ]
    # normalize to be from 100 (perfect median line) to 0 (completely outside
    # of cone)
    return 100 - np.abs(50 - np.mean(q)) / .5
예제 #21
0
def summarize_paths(samples, cone_std=(1., 1.5, 2.), starting_value=1.):
    """
    Gnerate the upper and lower bounds of an n standard deviation
    cone of forecasted cumulative returns.

    Parameters
    ----------
    samples : numpy.ndarray
        Alternative paths, or series of possible outcomes.
    cone_std : list of int/float
        Number of standard devations to use in the boundaries of
        the cone. If multiple values are passed, cone bounds will
        be generated for each value.

    Returns
    -------
    samples : pandas.core.frame.DataFrame
    """

    cum_samples = empyrical.cum_returns(samples.T,
                                        starting_value=starting_value).T

    cum_mean = cum_samples.mean(axis=0)
    cum_std = cum_samples.std(axis=0)

    if isinstance(cone_std, (float, int)):
        cone_std = [cone_std]

    cone_bounds = pd.DataFrame(columns=pd.Float64Index([]))
    for num_std in cone_std:
        cone_bounds.loc[:, float(num_std)] = cum_mean + cum_std * num_std
        cone_bounds.loc[:, float(-num_std)] = cum_mean - cum_std * num_std

    return cone_bounds
예제 #22
0
def gen_drawdown_table(returns, top=10):
    """
    Places top drawdowns in a table.

    Parameters
    ----------
    returns : pd.Series
        Daily returns of the strategy, noncumulative.
         - See full explanation in tears.create_full_tear_sheet.
    top : int, optional
        The amount of top drawdowns to find (default 10).

    Returns
    -------
    df_drawdowns : pd.DataFrame
        Information about top drawdowns.
    """

    df_cum = ep.cum_returns(returns, 1.0)
    drawdown_periods = get_top_drawdowns(returns, top=top)
    df_drawdowns = pd.DataFrame(index=list(range(top)),
                                columns=[
                                    'Net drawdown in %', 'Peak date',
                                    'Valley date', 'Recovery date', 'Duration'
                                ])

    for i, (peak, valley, recovery) in enumerate(drawdown_periods):
        if pd.isnull(recovery):
            df_drawdowns.loc[i, 'Duration'] = np.nan
        else:
            df_drawdowns.loc[i, 'Duration'] = len(
                pd.date_range(peak, recovery, freq='B'))
        # to_pydatetime()疑似是老的API,使用pd.to_datetime替代
        # df_drawdowns.loc[i, 'Peak date'] = (peak.to_pydatetime()
        #                                     .strftime('%Y-%m-%d'))
        # df_drawdowns.loc[i, 'Valley date'] = (valley.to_pydatetime()
        #                                       .strftime('%Y-%m-%d'))
        # if isinstance(recovery, float):
        #     df_drawdowns.loc[i, 'Recovery date'] = recovery
        # else:
        #     df_drawdowns.loc[i, 'Recovery date'] = (recovery.to_pydatetime()
        #                                             .strftime('%Y-%m-%d'))
        df_drawdowns.loc[i, 'Peak date'] = (
            pd.to_datetime(peak).strftime('%Y-%m-%d'))
        df_drawdowns.loc[i, 'Valley date'] = (
            pd.to_datetime(valley).strftime('%Y-%m-%d'))
        if isinstance(recovery, float):
            df_drawdowns.loc[i, 'Recovery date'] = recovery
        else:
            df_drawdowns.loc[i, 'Recovery date'] = pd.to_datetime(
                peak).strftime('%Y-%m-%d')
        df_drawdowns.loc[i, 'Net drawdown in %'] = (
            (df_cum.loc[peak] - df_cum.loc[valley]) / df_cum.loc[peak]) * 100

    df_drawdowns['Peak date'] = pd.to_datetime(df_drawdowns['Peak date'])
    df_drawdowns['Valley date'] = pd.to_datetime(df_drawdowns['Valley date'])
    df_drawdowns['Recovery date'] = pd.to_datetime(
        df_drawdowns['Recovery date'])

    return df_drawdowns
예제 #23
0
        def plot_drawdown_underwater(returns, ax=None, **kwargs):
            # Reference from https://github.com/quantopian/pyfolio/blob/master/pyfolio/plotting.py
            def percentage(x, pos):
                """
                Adds percentage sign to plot ticks.
                """
                return '%.0f%%' % x

            if ax is None:
                ax = plt.gca()

            y_axis_formatter = FuncFormatter(percentage)
            ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))

            df_cum_rets = ep.cum_returns(returns, starting_value=1.0)
            running_max = np.maximum.accumulate(df_cum_rets)
            underwater = -100 * ((running_max - df_cum_rets) / running_max)
            (underwater).plot(ax=ax,
                              kind='area',
                              color='coral',
                              alpha=0.7,
                              **kwargs)
            ax.set_ylabel('Drawdown')
            ax.set_title('Underwater plot')
            ax.set_xlabel('')
            return ax
예제 #24
0
    def get_sec_return_on_date(cls,
                               start_date,
                               end_date,
                               sec_ids,
                               freq=FreqType.EOD,
                               field=['close'],
                               return_type=DfReturnType.DateIndexAndSecIDCol,
                               is_cumul=False):
        """
        :param start_date: str, start date of the query period
        :param end_date: str, end date of the query period
        :param sec_ids: list of str, sec IDs
        :param field: str, filed of data to be queried
        :param freq: FreqType
        :param return_type
        :param is_cumul: return is cumul or not
        :return: pd.DataFrame, index = date, col = sec ID
        """
        if not w.isconnected():
            w.start()

        ret = WindMarketDataHandler.get_sec_price_on_date(
            start_date, end_date, sec_ids, freq, field, return_type)
        ret = ret.pct_change()
        if is_cumul:
            ret = ret.fillna(0)
            ret = cum_returns(ret, starting_value=1.0)
        else:
            ret = ret.dropna()
        return ret
예제 #25
0
def ensure_cumul_return(func, argname, arg):
    if not isinstance(arg, RETURN):
        return arg
    if arg.type == ReturnType.Cumul:
        return arg.data
    else:
        data_ = cum_returns(arg.data, starting_value=1.0)
        return data_
예제 #26
0
def ensure_cumul_return(func, argname, arg):
    if not isinstance(arg, RETURN):
        return arg
    if arg.type == ReturnType.Cumul:
        return arg.data
    else:
        data_ = cum_returns(arg.data, starting_value=1.0)
        return data_
예제 #27
0
 def test_cum_returns(self, returns, starting_value, expected):
     cum_returns = empyrical.cum_returns(returns,
                                         starting_value=starting_value)
     for i in range(returns.size):
         assert_almost_equal(
             cum_returns[i],
             expected[i],
             4)
예제 #28
0
def cumulative_returns(daily_returns):
    """Cumulative Returns"""
    try:
        logger.info("Calculating Cumulative Returns...")
        cr = empyrical.cum_returns(daily_returns)
        return cr
    except Exception as exception:
        logger.error('Oops! An error Occurred ⚠️')
        raise exception
예제 #29
0
def apply_slippage_penalty(
    returns,
    txn_daily,
    simulate_starting_capital,
    backtest_starting_capital,
    impact=0.1,
):
    """
    Applies quadratic volumeshare slippage model to daily returns based
    on the proportion of the observed historical daily bar dollar volume
    consumed by the strategy's trades. Scales the size of trades based
    on the ratio of the starting capital we wish to test to the starting
    capital of the passed backtest data.

    Parameters
    ----------
    returns : pd.Series
        Time series of daily returns.
    txn_daily : pd.Series
        Daily transaciton totals, closing price, and daily volume for
        each traded name. See price_volume_daily_txns for more details.
    simulate_starting_capital : integer
        capital at which we want to test
    backtest_starting_capital: capital base at which backtest was
        origionally run. impact: See Zipline volumeshare slippage model
    impact : float
        Scales the size of the slippage penalty.

    Returns
    -------
    adj_returns : pd.Series
        Slippage penalty adjusted daily returns.
    """

    mult = simulate_starting_capital / backtest_starting_capital
    simulate_traded_shares = abs(mult * txn_daily.amount)
    simulate_traded_dollars = txn_daily.price * simulate_traded_shares
    simulate_pct_volume_used = simulate_traded_shares / txn_daily.volume

    penalties = (simulate_pct_volume_used**2 * impact *
                 simulate_traded_dollars)

    daily_penalty = penalties.resample("D").sum()
    daily_penalty = daily_penalty.reindex(returns.index).fillna(0)

    # Since we are scaling the numerator of the penalties linearly
    # by capital base, it makes the most sense to scale the denominator
    # similarly. In other words, since we aren't applying compounding to
    # simulate_traded_shares, we shouldn't apply compounding to pv.
    portfolio_value = (
        ep.cum_returns(returns, starting_value=backtest_starting_capital) *
        mult)

    adj_returns = returns - (daily_penalty / portfolio_value)

    return adj_returns
예제 #30
0
def gen_drawdown_table(returns, top=10):
    """
    Places top drawdowns in a table.

    Parameters
    ----------
    returns : pd.Series
        Daily returns of the strategy, noncumulative.
         - See full explanation in tears.create_full_tear_sheet.
    top : int, optional
        The amount of top drawdowns to find (default 10).

    Returns
    -------
    df_drawdowns : pd.DataFrame
        Information about top drawdowns.
    """

    df_cum = ep.cum_returns(returns, 1.0)
    drawdown_periods = get_top_drawdowns(returns, top=top)
    df_drawdowns = pd.DataFrame(index=list(range(top)),
                                columns=[
                                    'Net drawdown in %', 'Peak date',
                                    'Valley date', 'Recovery date', 'Duration'
                                ])

    # if there were fewer than the requested number of drawdowns
    # (which can happen if an early drawdown never recovers),
    # only return the available number of drawdowns (otherwise
    # the worst drawdowns plot can mess up the shared X axis)
    df_drawdowns = df_drawdowns.dropna(how="all")

    for i, (peak, valley, recovery) in enumerate(drawdown_periods):
        if pd.isnull(recovery):
            df_drawdowns.loc[i, 'Duration'] = np.nan
        else:
            df_drawdowns.loc[i, 'Duration'] = len(
                pd.date_range(peak, recovery, freq='B'))
        df_drawdowns.loc[i, 'Peak date'] = (
            peak.to_pydatetime().strftime('%Y-%m-%d'))
        df_drawdowns.loc[i, 'Valley date'] = (
            valley.to_pydatetime().strftime('%Y-%m-%d'))
        if isinstance(recovery, float):
            df_drawdowns.loc[i, 'Recovery date'] = recovery
        else:
            df_drawdowns.loc[i, 'Recovery date'] = (
                recovery.to_pydatetime().strftime('%Y-%m-%d'))
        df_drawdowns.loc[i, 'Net drawdown in %'] = (
            (df_cum.loc[peak] - df_cum.loc[valley]) / df_cum.loc[peak]) * 100

    df_drawdowns['Peak date'] = pd.to_datetime(df_drawdowns['Peak date'])
    df_drawdowns['Valley date'] = pd.to_datetime(df_drawdowns['Valley date'])
    df_drawdowns['Recovery date'] = pd.to_datetime(
        df_drawdowns['Recovery date'])

    return df_drawdowns
예제 #31
0
def gen_drawdown_table(returns, top=10):
    """
    Places top drawdowns in a table.

    Parameters
    ----------
    returns : pd.Series
        Daily returns of the strategy, noncumulative.
         - See full explanation in tears.create_full_tear_sheet.
    top : int, optional
        The amount of top drawdowns to find (default 10).

    Returns
    -------
    df_drawdowns : pd.DataFrame
        Information about top drawdowns.
    """

    df_cum = ep.cum_returns(returns, 1.0)
    drawdown_periods = get_top_drawdowns(returns, top=top)
    df_drawdowns = pd.DataFrame(
        index=list(range(top)),
        columns=[
            "Net drawdown in %",
            "Peak date",
            "Valley date",
            "Recovery date",
            "Duration",
        ],
    )

    for i, (peak, valley, recovery) in enumerate(drawdown_periods):
        if pd.isnull(recovery):
            df_drawdowns.loc[i, "Duration"] = np.nan
        else:
            df_drawdowns.loc[i, "Duration"] = len(
                pd.date_range(peak, recovery, freq="B"))
        df_drawdowns.loc[i, "Peak date"] = peak.to_pydatetime().strftime(
            "%Y-%m-%d")
        df_drawdowns.loc[i, "Valley date"] = valley.to_pydatetime().strftime(
            "%Y-%m-%d")
        if isinstance(recovery, float):
            df_drawdowns.loc[i, "Recovery date"] = recovery
        else:
            df_drawdowns.loc[i, "Recovery date"] = recovery.to_pydatetime(
            ).strftime("%Y-%m-%d")
        df_drawdowns.loc[i, "Net drawdown in %"] = (
            (df_cum.loc[peak] - df_cum.loc[valley]) / df_cum.loc[peak]) * 100

    df_drawdowns["Peak date"] = pd.to_datetime(df_drawdowns["Peak date"])
    df_drawdowns["Valley date"] = pd.to_datetime(df_drawdowns["Valley date"])
    df_drawdowns["Recovery date"] = pd.to_datetime(
        df_drawdowns["Recovery date"])

    return df_drawdowns
예제 #32
0
def plot_returns(perf_attrib_data, ax=None):
    """
    Plot total, specific, and common returns.

    Parameters
    ----------
    perf_attrib_data : pd.DataFrame
        df with factors, common returns, and specific returns as columns,
        and datetimes as index
        - Example:
                        momentum  reversal  common_returns  specific_returns
            dt
            2017-01-01  0.249087  0.935925        1.185012          1.185012
            2017-01-02 -0.003194 -0.400786       -0.403980         -0.403980

    ax :  matplotlib.axes.Axes
        axes on which plots are made. if None, current axes will be used

    Returns
    -------
    ax :  matplotlib.axes.Axes
    """
    if ax is None:
        ax = plt.gca()

    returns = perf_attrib_data['total_returns']
    specific_returns = perf_attrib_data['specific_returns']
    common_returns = perf_attrib_data['common_returns']

    ax.plot(ep.cum_returns(returns), color='g', label='Total returns')
    ax.plot(ep.cum_returns(specific_returns), color='b',
            label='Cumulative specific returns')
    ax.plot(ep.cum_returns(common_returns), color='r',
            label='Cumulative common returns')

    ax.set_title('Time series of cumulative returns')
    ax.set_ylabel('Returns')

    set_legend_location(ax)

    return ax
예제 #33
0
def gen_drawdown_table(returns, top=10):
    """
    Places top drawdowns in a table.

    Parameters
    ----------
    returns : pd.Series
        Daily returns of the strategy, noncumulative.
         - See full explanation in tears.create_full_tear_sheet.
    top : int, optional
        The amount of top drawdowns to find (default 10).

    Returns
    -------
    df_drawdowns : pd.DataFrame
        Information about top drawdowns.
    """

    df_cum = empyrical.cum_returns(returns, 1.0)
    drawdown_periods = get_top_drawdowns(returns, top=top)
    df_drawdowns = pd.DataFrame(index=list(range(top)),
                                columns=['Net drawdown in %',
                                         'Peak date',
                                         'Valley date',
                                         'Recovery date',
                                         'Duration'])

    for i, (peak, valley, recovery) in enumerate(drawdown_periods):
        if pd.isnull(recovery):
            df_drawdowns.loc[i, 'Duration'] = np.nan
        else:
            df_drawdowns.loc[i, 'Duration'] = len(pd.date_range(peak,
                                                                recovery,
                                                                freq='B'))
        df_drawdowns.loc[i, 'Peak date'] = (peak.to_pydatetime()
                                            .strftime('%Y-%m-%d'))
        df_drawdowns.loc[i, 'Valley date'] = (valley.to_pydatetime()
                                              .strftime('%Y-%m-%d'))
        if isinstance(recovery, float):
            df_drawdowns.loc[i, 'Recovery date'] = recovery
        else:
            df_drawdowns.loc[i, 'Recovery date'] = (recovery.to_pydatetime()
                                                    .strftime('%Y-%m-%d'))
        df_drawdowns.loc[i, 'Net drawdown in %'] = (
            (df_cum.loc[peak] - df_cum.loc[valley]) / df_cum.loc[peak]) * 100

    df_drawdowns['Peak date'] = pd.to_datetime(df_drawdowns['Peak date'])
    df_drawdowns['Valley date'] = pd.to_datetime(df_drawdowns['Valley date'])
    df_drawdowns['Recovery date'] = pd.to_datetime(
        df_drawdowns['Recovery date'])

    return df_drawdowns
예제 #34
0
def plot_factor_contribution_to_perf(
    perf_attrib_data,
    ax=None,
    title="Cumulative common returns attribution",
):
    """
    Plot each factor's contribution to performance.

    Parameters
    ----------
    perf_attrib_data : pd.DataFrame
        df with factors, common returns, and specific returns as columns,
        and datetimes as index
        - Example:
                        momentum  reversal  common_returns  specific_returns
            dt
            2017-01-01  0.249087  0.935925        1.185012          1.185012
            2017-01-02 -0.003194 -0.400786       -0.403980         -0.403980

    ax :  matplotlib.axes.Axes
        axes on which plots are made. if None, current axes will be used

    title : str, optional
        title of plot

    Returns
    -------
    ax :  matplotlib.axes.Axes
    """
    if ax is None:
        ax = plt.gca()

    factors_to_plot = perf_attrib_data.drop(
        ["total_returns", "common_returns", "tilt_returns", "timing_returns"],
        axis="columns",
        errors="ignore",
    )

    factors_cumulative = pd.DataFrame()
    for factor in factors_to_plot:
        factors_cumulative[factor] = ep.cum_returns(factors_to_plot[factor])

    for col in factors_cumulative:
        ax.plot(factors_cumulative[col])

    ax.axhline(0, color="k")
    configure_legend(ax, change_colors=True)

    ax.set_ylabel("Cumulative returns by factor")
    ax.set_title(title)

    return ax
예제 #35
0
def getAssetReturn(assets, start_date, end_date):
    chg_pct = getDailyIndexData(assets,
                                start_date,
                                end_date,
                                "CHG_PCT",
                                mode=1)
    cum_return = dict()
    for asset in assets:
        cum_return[asset] = empyrical.cum_returns(chg_pct.loc[:, asset],
                                                  starting_value=1.0)

    cum_return = pd.DataFrame(data=cum_return)
    return cum_return
예제 #36
0
def plot_returns(returns, specific_returns, common_returns, ax=None):
    """
    Plot total, specific, and common returns.

    Parameters
    ----------
    returns : pd.Series
        total returns, indexed by datetime

    specific_returns : pd.Series
        specific returns, indexed by datetime

    commons_returns : pd.Series
        common returns, indexed by datetime

    ax :  matplotlib.axes.Axes
        axes on which plots are made. if None, current axes will be used

    Returns
    -------
    ax :  matplotlib.axes.Axes
    """
    if ax is None:
        ax = plt.gca()

    ax.plot(ep.cum_returns(returns), color='g', label='Total returns')
    ax.plot(ep.cum_returns(specific_returns),
            color='b',
            label='Cumulative specific returns')
    ax.plot(ep.cum_returns(common_returns),
            color='r',
            label='Cumulative common returns')

    ax.set_title('Time Series of cumulative returns')
    ax.set_ylabel('Returns')

    set_legend_location(ax)

    return ax
예제 #37
0
파일: capacity.py 프로젝트: jimgoo/pyfolio
def apply_slippage_penalty(returns, txn_daily, simulate_starting_capital,
                           backtest_starting_capital, impact=0.1):
    """
    Applies quadratic volumeshare slippage model to daily returns based
    on the proportion of the observed historical daily bar dollar volume
    consumed by the strategy's trades. Scales the size of trades based
    on the ratio of the starting capital we wish to test to the starting
    capital of the passed backtest data.

    Parameters
    ----------
    returns : pd.Series
        Time series of daily returns.
    txn_daily : pd.Series
        Daily transaciton totals, closing price, and daily volume for
        each traded name. See price_volume_daily_txns for more details.
    simulate_starting_capital : integer
        capital at which we want to test
    backtest_starting_capital: capital base at which backtest was
        origionally run. impact: See Zipline volumeshare slippage model
    impact : float
        Scales the size of the slippage penalty.

    Returns
    -------
    adj_returns : pd.Series
        Slippage penalty adjusted daily returns.
    """

    mult = simulate_starting_capital / backtest_starting_capital
    simulate_traded_shares = abs(mult * txn_daily.amount)
    simulate_traded_dollars = txn_daily.price * simulate_traded_shares
    simulate_pct_volume_used = simulate_traded_shares / txn_daily.volume

    penalties = simulate_pct_volume_used**2 \
        * impact * simulate_traded_dollars

    daily_penalty = penalties.resample('D').sum()
    daily_penalty = daily_penalty.reindex(returns.index).fillna(0)

    # Since we are scaling the numerator of the penalties linearly
    # by capital base, it makes the most sense to scale the denominator
    # similarly. In other words, since we aren't applying compounding to
    # simulate_traded_shares, we shouldn't apply compounding to pv.
    portfolio_value = empyrical.cum_returns(
        returns, starting_value=backtest_starting_capital) * mult

    adj_returns = returns - (daily_penalty / portfolio_value)

    return adj_returns
예제 #38
0
파일: timeseries.py 프로젝트: csvk/modpkgs
def get_top_drawdowns(returns, top=10):
    """
    Finds top drawdowns, sorted by drawdown amount.

    Parameters
    ----------
    returns : pd.Series
        Daily returns of the strategy, noncumulative.
         - See full explanation in tears.create_full_tear_sheet.
    top : int, optional
        The amount of top drawdowns to find (default 10).

    Returns
    -------
    drawdowns : list
        List of drawdown peaks, valleys, and recoveries. See get_max_drawdown.
    """

    returns = returns.copy()
    df_cum = ep.cum_returns(returns, 1.0)
    running_max = np.maximum.accumulate(df_cum)
    underwater = df_cum / running_max - 1

    # SVK start
    #print('returns', returns)
    #print('uw', underwater)
    #import pickle as pkl
    #pkl.dump([returns, underwater], open('dump.obj', "wb" ))
    #print('count', top)

    # SVK end

    drawdowns = []
    for t in range(top):
        #print('iter', t) # SVK
        peak, valley, recovery = get_max_drawdown_underwater(underwater)
        #print(peak, valley, recovery) # SVK
        # Slice out draw-down period
        if not pd.isnull(recovery):
            underwater.drop(underwater[peak:recovery].index[1:-1],
                            inplace=True)
        else:
            # drawdown has not ended yet
            underwater = underwater.loc[:peak]

        drawdowns.append((peak, valley, recovery))
        if (len(returns) == 0) or (len(underwater) == 0):
            break

    return drawdowns
예제 #39
0
def plot_factor_contribution_to_perf(
        perf_attrib_data,
        ax=None,
        title='Cumulative common returns attribution',
):
    """
    Plot each factor's contribution to performance.

    Parameters
    ----------
    perf_attrib_data : pd.DataFrame
        df with factors, common returns, and specific returns as columns,
        and datetimes as index
        - Example:
                        momentum  reversal  common_returns  specific_returns
            dt
            2017-01-01  0.249087  0.935925        1.185012          1.185012
            2017-01-02 -0.003194 -0.400786       -0.403980         -0.403980

    ax :  matplotlib.axes.Axes
        axes on which plots are made. if None, current axes will be used

    title : str, optional
        title of plot

    Returns
    -------
    ax :  matplotlib.axes.Axes
    """
    if ax is None:
        ax = plt.gca()

    factors_to_plot = perf_attrib_data.drop(
        ['total_returns', 'common_returns'], axis='columns', errors='ignore'
    )

    factors_cumulative = ep.cum_returns(factors_to_plot)

    for col in factors_cumulative:
        ax.plot(factors_cumulative[col])

    ax.axhline(0, color='k')
    configure_legend(ax, change_colors=True)

    ax.set_ylabel('Cumulative returns by factor')
    ax.set_title(title)

    return ax
예제 #40
0
def get_top_drawdowns(returns, top=10):
    """
    Finds top drawdowns, sorted by drawdown amount.

    Parameters
    ----------
    returns : pd.Series
        Daily returns of the strategy, noncumulative.
         - See full explanation in tears.create_full_tear_sheet.
    top : int, optional
        The amount of top drawdowns to find (default 10).

    Returns
    -------
    drawdowns : list
        List of drawdown peaks, valleys, and recoveries. See get_max_drawdown.
    """

    returns = returns.copy()
    df_cum = empyrical.cum_returns(returns, 1.0)
    running_max = np.maximum.accumulate(df_cum)
    underwater = df_cum / running_max - 1

    drawdowns = []
    for t in range(top):
        peak, valley, recovery = get_max_drawdown_underwater(underwater)
        # Slice out draw-down period
        if not pd.isnull(recovery):
            underwater.drop(underwater[peak: recovery].index[1:-1],
                            inplace=True)
        else:
            # drawdown has not ended yet
            underwater = underwater.loc[:peak]

        drawdowns.append((peak, valley, recovery))
        if (len(returns) == 0) or (len(underwater) == 0):
            break

    return drawdowns
예제 #41
0
def cum_returns(returns, starting_value=0):
    """
    Compute cumulative returns from simple returns.

    Parameters
    ----------
    returns : pd.Series
        Daily returns of the strategy, noncumulative.
         - See full explanation in tears.create_full_tear_sheet.
    starting_value : float, optional
       The starting returns (default 1).

    Returns
    -------
    pandas.Series
        Series of cumulative returns.

    Notes
    -----
    For increased numerical accuracy, convert input to log returns
    where it is possible to sum instead of multiplying.
    """

    return empyrical.cum_returns(returns, starting_value=starting_value)
예제 #42
0
    def update(self, dt, algorithm_returns, benchmark_returns, leverage):
        # Keep track of latest dt for use in to_dict and other methods
        # that report current state.
        self.latest_dt = dt
        dt_loc = self.cont_index.get_loc(dt)
        self.latest_dt_loc = dt_loc

        self.algorithm_returns_cont[dt_loc] = algorithm_returns
        self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1]

        self.num_trading_days = len(self.algorithm_returns)

        if self.create_first_day_stats:
            if len(self.algorithm_returns) == 1:
                self.algorithm_returns = np.append(0.0, self.algorithm_returns)

        self.algorithm_cumulative_returns[dt_loc] = cum_returns(
            self.algorithm_returns
        )[-1]

        algo_cumulative_returns_to_date = \
            self.algorithm_cumulative_returns[:dt_loc + 1]

        self.mean_returns_cont[dt_loc] = \
            algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days

        self.mean_returns = self.mean_returns_cont[:dt_loc + 1]

        self.annualized_mean_returns_cont[dt_loc] = \
            self.mean_returns_cont[dt_loc] * 252

        self.annualized_mean_returns = \
            self.annualized_mean_returns_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.mean_returns) == 1:
                self.mean_returns = np.append(0.0, self.mean_returns)
                self.annualized_mean_returns = np.append(
                    0.0, self.annualized_mean_returns)

        self.benchmark_returns_cont[dt_loc] = benchmark_returns
        self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.benchmark_returns) == 1:
                self.benchmark_returns = np.append(0.0, self.benchmark_returns)

        self.benchmark_cumulative_returns[dt_loc] = cum_returns(
            self.benchmark_returns
        )[-1]

        benchmark_cumulative_returns_to_date = \
            self.benchmark_cumulative_returns[:dt_loc + 1]

        self.mean_benchmark_returns_cont[dt_loc] = \
            benchmark_cumulative_returns_to_date[dt_loc] / \
            self.num_trading_days

        self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc]

        self.annualized_mean_benchmark_returns_cont[dt_loc] = \
            self.mean_benchmark_returns_cont[dt_loc] * 252

        self.annualized_mean_benchmark_returns = \
            self.annualized_mean_benchmark_returns_cont[:dt_loc + 1]

        self.algorithm_cumulative_leverages_cont[dt_loc] = leverage
        self.algorithm_cumulative_leverages = \
            self.algorithm_cumulative_leverages_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.algorithm_cumulative_leverages) == 1:
                self.algorithm_cumulative_leverages = np.append(
                    0.0,
                    self.algorithm_cumulative_leverages)

        if not len(self.algorithm_returns) and len(self.benchmark_returns):
            message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end} on {dt}"
            message = message.format(
                bm_count=len(self.benchmark_returns),
                algo_count=len(self.algorithm_returns),
                start=self.start_session,
                end=self.end_session,
                dt=dt
            )
            raise Exception(message)

        self.update_current_max()
        self.benchmark_volatility[dt_loc] = annual_volatility(
            self.benchmark_returns
        )
        self.algorithm_volatility[dt_loc] = annual_volatility(
            self.algorithm_returns
        )

        # caching the treasury rates for the minutely case is a
        # big speedup, because it avoids searching the treasury
        # curves on every minute.
        # In both minutely and daily, the daily curve is always used.
        treasury_end = dt.replace(hour=0, minute=0)
        if np.isnan(self.daily_treasury[treasury_end]):
            treasury_period_return = choose_treasury(
                self.treasury_curves,
                self.start_session,
                treasury_end,
                self.trading_calendar,
            )
            self.daily_treasury[treasury_end] = treasury_period_return
        self.treasury_period_return = self.daily_treasury[treasury_end]
        self.excess_returns[dt_loc] = (
            self.algorithm_cumulative_returns[dt_loc] -
            self.treasury_period_return)

        self.alpha[dt_loc], self.beta[dt_loc] = alpha_beta_aligned(
            self.algorithm_returns,
            self.benchmark_returns,
        )
        self.sharpe[dt_loc] = sharpe_ratio(
            self.algorithm_returns,
        )
        self.downside_risk[dt_loc] = downside_risk(
            self.algorithm_returns
        )
        self.sortino[dt_loc] = sortino_ratio(
            self.algorithm_returns,
            _downside_risk=self.downside_risk[dt_loc]
        )
        self.information[dt_loc] = information_ratio(
            self.algorithm_returns,
            self.benchmark_returns,
        )
        self.max_drawdown = max_drawdown(
            self.algorithm_returns
        )
        self.max_drawdowns[dt_loc] = self.max_drawdown
        self.max_leverage = self.calculate_max_leverage()
        self.max_leverages[dt_loc] = self.max_leverage
예제 #43
0
파일: period.py 프로젝트: FranSal/zipline
    def calculate_metrics(self):
        self.benchmark_period_returns = \
            cum_returns(self.benchmark_returns).iloc[-1]

        self.algorithm_period_returns = \
            cum_returns(self.algorithm_returns).iloc[-1]

        if not self.algorithm_returns.index.equals(
            self.benchmark_returns.index
        ):
            message = "Mismatch between benchmark_returns ({bm_count}) and \
            algorithm_returns ({algo_count}) in range {start} : {end}"
            message = message.format(
                bm_count=len(self.benchmark_returns),
                algo_count=len(self.algorithm_returns),
                start=self._start_session,
                end=self._end_session
            )
            raise Exception(message)

        self.num_trading_days = len(self.benchmark_returns)

        self.mean_algorithm_returns = (
            self.algorithm_returns.cumsum() /
            np.arange(1, self.num_trading_days + 1, dtype=np.float64)
        )

        self.benchmark_volatility = annual_volatility(self.benchmark_returns)
        self.algorithm_volatility = annual_volatility(self.algorithm_returns)

        self.treasury_period_return = choose_treasury(
            self.treasury_curves,
            self._start_session,
            self._end_session,
            self.trading_calendar,
        )
        self.sharpe = sharpe_ratio(
            self.algorithm_returns,
        )
        # The consumer currently expects a 0.0 value for sharpe in period,
        # this differs from cumulative which was np.nan.
        # When factoring out the sharpe_ratio, the different return types
        # were collapsed into `np.nan`.
        # TODO: Either fix consumer to accept `np.nan` or make the
        # `sharpe_ratio` return type configurable.
        # In the meantime, convert nan values to 0.0
        if pd.isnull(self.sharpe):
            self.sharpe = 0.0
        self.downside_risk = downside_risk(
            self.algorithm_returns.values
        )
        self.sortino = sortino_ratio(
            self.algorithm_returns.values,
            _downside_risk=self.downside_risk,
        )
        self.information = information_ratio(
            self.algorithm_returns.values,
            self.benchmark_returns.values,
        )
        self.alpha, self.beta = alpha_beta_aligned(
            self.algorithm_returns.values,
            self.benchmark_returns.values,
        )
        self.excess_return = self.algorithm_period_returns - \
            self.treasury_period_return
        self.max_drawdown = max_drawdown(self.algorithm_returns.values)
        self.max_leverage = self.calculate_max_leverage()
예제 #44
0
    def risk_metric_period(cls,
                           start_session,
                           end_session,
                           algorithm_returns,
                           benchmark_returns,
                           algorithm_leverages):
        """
        Creates a dictionary representing the state of the risk report.

        Parameters
        ----------
        start_session : pd.Timestamp
            Start of period (inclusive) to produce metrics on
        end_session : pd.Timestamp
            End of period (inclusive) to produce metrics on
        algorithm_returns : pd.Series(pd.Timestamp -> float)
            Series of algorithm returns as of the end of each session
        benchmark_returns : pd.Series(pd.Timestamp -> float)
            Series of benchmark returns as of the end of each session
        algorithm_leverages : pd.Series(pd.Timestamp -> float)
            Series of algorithm leverages as of the end of each session


        Returns
        -------
        risk_metric : dict[str, any]
            Dict of metrics that with fields like:
                {
                    'algorithm_period_return': 0.0,
                    'benchmark_period_return': 0.0,
                    'treasury_period_return': 0,
                    'excess_return': 0.0,
                    'alpha': 0.0,
                    'beta': 0.0,
                    'sharpe': 0.0,
                    'sortino': 0.0,
                    'period_label': '1970-01',
                    'trading_days': 0,
                    'algo_volatility': 0.0,
                    'benchmark_volatility': 0.0,
                    'max_drawdown': 0.0,
                    'max_leverage': 0.0,
                }
        """

        algorithm_returns = algorithm_returns[
            (algorithm_returns.index >= start_session) &
            (algorithm_returns.index <= end_session)
        ]

        # Benchmark needs to be masked to the same dates as the algo returns
        benchmark_returns = benchmark_returns[
            (benchmark_returns.index >= start_session) &
            (benchmark_returns.index <= algorithm_returns.index[-1])
        ]

        benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1]
        algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1]

        alpha, beta = ep.alpha_beta_aligned(
            algorithm_returns.values,
            benchmark_returns.values,
        )

        sharpe = ep.sharpe_ratio(algorithm_returns)

        # The consumer currently expects a 0.0 value for sharpe in period,
        # this differs from cumulative which was np.nan.
        # When factoring out the sharpe_ratio, the different return types
        # were collapsed into `np.nan`.
        # TODO: Either fix consumer to accept `np.nan` or make the
        # `sharpe_ratio` return type configurable.
        # In the meantime, convert nan values to 0.0
        if pd.isnull(sharpe):
            sharpe = 0.0

        sortino = ep.sortino_ratio(
            algorithm_returns.values,
            _downside_risk=ep.downside_risk(algorithm_returns.values),
        )

        rval = {
            'algorithm_period_return': algorithm_period_returns,
            'benchmark_period_return': benchmark_period_returns,
            'treasury_period_return': 0,
            'excess_return': algorithm_period_returns,
            'alpha': alpha,
            'beta': beta,
            'sharpe': sharpe,
            'sortino': sortino,
            'period_label': end_session.strftime("%Y-%m"),
            'trading_days': len(benchmark_returns),
            'algo_volatility': ep.annual_volatility(algorithm_returns),
            'benchmark_volatility': ep.annual_volatility(benchmark_returns),
            'max_drawdown': ep.max_drawdown(algorithm_returns.values),
            'max_leverage': algorithm_leverages.max(),
        }

        # check if a field in rval is nan or inf, and replace it with None
        # except period_label which is always a str
        return {
            k: (
                None
                if k != 'period_label' and not np.isfinite(v) else
                v
            )
            for k, v in iteritems(rval)
        }