コード例 #1
0
 def test_downside_risk_noisy(self, noise, flat_line):
     noisy_returns_1 = noise[0:250].add(flat_line[250:], fill_value=0)
     noisy_returns_2 = noise[0:500].add(flat_line[500:], fill_value=0)
     noisy_returns_3 = noise[0:750].add(flat_line[750:], fill_value=0)
     dr_1 = empyrical.downside_risk(noisy_returns_1, flat_line)
     dr_2 = empyrical.downside_risk(noisy_returns_2, flat_line)
     dr_3 = empyrical.downside_risk(noisy_returns_3, flat_line)
     assert dr_1 <= dr_2
     assert dr_2 <= dr_3
コード例 #2
0
 def test_downside_risk_std(self, smaller_std, larger_std):
     less_noise = pd.Series(
         [random.gauss(0, smaller_std) for i in range(1000)],
         index=pd.date_range('2000-1-30', periods=1000, freq='D'))
     more_noise = pd.Series(
         [random.gauss(0, larger_std) for i in range(1000)],
         index=pd.date_range('2000-1-30', periods=1000, freq='D'))
     assert empyrical.downside_risk(less_noise) < \
         empyrical.downside_risk(more_noise)
コード例 #3
0
ファイル: test_returns.py プロジェクト: zhgu-dev/vectorbt
 def test_downside_risk(self, test_required_return):
     res_a = empyrical.downside_risk(ret['a'], required_return=test_required_return)
     res_b = empyrical.downside_risk(ret['b'], required_return=test_required_return)
     res_c = empyrical.downside_risk(ret['c'], required_return=test_required_return)
     assert isclose(ret['a'].vbt.returns.downside_risk(required_return=test_required_return), res_a)
     pd.testing.assert_series_equal(
         ret.vbt.returns.downside_risk(required_return=test_required_return),
         pd.Series([res_a, res_b, res_c], index=ret.columns).rename('downside_risk')
     )
コード例 #4
0
ファイル: metric_store.py プロジェクト: williamsyb/Sniper
 def get_annual_downside_risk(self, data):  # 2.15%
     data = copy.deepcopy(data)
     rh = self.rft_ret / self.q
     downside_risk = empyrical.downside_risk(data.rets.dropna(),
                                             required_return=rh,
                                             period='weekly')
     return downside_risk
コード例 #5
0
ファイル: timeseries.py プロジェクト: jimgoo/pyfolio
def downside_risk(returns, required_return=0, period=DAILY):
    """
    Determines the downside deviation below a threshold

    Parameters
    ----------
    returns : pd.Series or pd.DataFrame
        Daily returns of the strategy, noncumulative.
        - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
    required_return: float / series
        minimum acceptable return
    period : str, optional
        Defines the periodicity of the 'returns' data for purposes of
        annualizing. Can be 'monthly', 'weekly', or 'daily'.
        - Defaults to 'daily'.

    Returns
    -------
    depends on input type
    series ==> float
    DataFrame ==> np.array

        Annualized downside deviation
    """

    return empyrical.downside_risk(returns,
                                   required_return=required_return,
                                   period=period)
コード例 #6
0
    def calculate_metrics(self):
        self.benchmark_period_returns = \
            cum_returns(self.benchmark_returns).iloc[-1]

        self.algorithm_period_returns = \
            cum_returns(self.algorithm_returns).iloc[-1]

        if not self.algorithm_returns.index.equals(
                self.benchmark_returns.index):
            message = "Mismatch between benchmark_returns ({bm_count}) and \
            algorithm_returns ({algo_count}) in range {start} : {end}"

            message = message.format(bm_count=len(self.benchmark_returns),
                                     algo_count=len(self.algorithm_returns),
                                     start=self._start_session,
                                     end=self._end_session)
            raise Exception(message)

        self.num_trading_days = len(self.benchmark_returns)

        self.mean_algorithm_returns = (
            self.algorithm_returns.cumsum() /
            np.arange(1, self.num_trading_days + 1, dtype=np.float64))

        self.benchmark_volatility = annual_volatility(self.benchmark_returns)
        self.algorithm_volatility = annual_volatility(self.algorithm_returns)

        self.treasury_period_return = choose_treasury(
            self.treasury_curves,
            self._start_session,
            self._end_session,
            self.trading_calendar,
        )
        self.sharpe = sharpe_ratio(self.algorithm_returns, )
        # The consumer currently expects a 0.0 value for sharpe in period,
        # this differs from cumulative which was np.nan.
        # When factoring out the sharpe_ratio, the different return types
        # were collapsed into `np.nan`.
        # TODO: Either fix consumer to accept `np.nan` or make the
        # `sharpe_ratio` return type configurable.
        # In the meantime, convert nan values to 0.0
        if pd.isnull(self.sharpe):
            self.sharpe = 0.0
        self.downside_risk = downside_risk(self.algorithm_returns.values)
        self.sortino = sortino_ratio(
            self.algorithm_returns.values,
            _downside_risk=self.downside_risk,
        )
        self.information = information_ratio(
            self.algorithm_returns.values,
            self.benchmark_returns.values,
        )
        self.alpha, self.beta = alpha_beta_aligned(
            self.algorithm_returns.values,
            self.benchmark_returns.values,
        )
        self.excess_return = self.algorithm_period_returns - \
            self.treasury_period_return
        self.max_drawdown = max_drawdown(self.algorithm_returns.values)
        self.max_leverage = self.calculate_max_leverage()
コード例 #7
0
    def evaluation(self):
        ap.sound(f'entry: create_df')

        mdd = empyrical.max_drawdown(self.df.eac_stgy_rt)
        stgy_ret_an = empyrical.annual_return(self.df.eac_stgy_rt, annualization=self.cls.annualization)
        bcmk_ret_an = empyrical.annual_return(self.df.eac_bcmk_rt, annualization=self.cls.annualization)
        stgy_vlt_an = empyrical.annual_volatility(self.df.eac_stgy_rt, annualization=self.cls.annualization)
        bcmk_vlt_an = empyrical.annual_volatility(self.df.eac_bcmk_rt, annualization=self.cls.annualization)
        calmar = empyrical.calmar_ratio(self.df.eac_stgy_rt, annualization=self.cls.annualization)
        omega = empyrical.omega_ratio(self.df.eac_stgy_rt, risk_free=self.cls.rf, annualization=self.cls.annualization)
        sharpe = qp.sharpe_ratio(stgy_ret_an, self.df.cum_stgy_rt, self.cls.rf)
        sortino = empyrical.sortino_ratio(self.df.eac_stgy_rt, annualization=self.cls.annualization)
        dsrk = empyrical.downside_risk(self.df.eac_stgy_rt, annualization=self.cls.annualization)
        information = empyrical.information_ratio(self.df.eac_stgy_rt, factor_returns=self.df.eac_bcmk_rt)
        beta = empyrical.beta(self.df.eac_stgy_rt, factor_returns=self.df.eac_bcmk_rt, risk_free=self.cls.rf)
        tail_rt = empyrical.tail_ratio(self.df.eac_stgy_rt)
        alpha = qp.alpha_ratio(stgy_ret_an, bcmk_ret_an, self.cls.rf, beta)

        stgy_ttrt_rt = (self.cls.yd.ttas[-1] - self.cls.yd.ttas[0]) / self.cls.yd.ttas[0]
        bcmk_ttrt_rt = (self.cls.pc.close[-1] - self.cls.pc.close[0]) / self.cls.pc.close[0]
        car_rt = stgy_ttrt_rt - bcmk_ttrt_rt
        car_rt_an = stgy_ret_an - bcmk_ret_an

        self.cls.df_output = pd.DataFrame(
            {'sgty_ttrt_rt': [stgy_ttrt_rt], 'bcmk_ttrt_rt': [bcmk_ttrt_rt], 'car_rt': [car_rt],
             'stgy_ret_an': [stgy_ret_an], 'bcmk_ret_an': [bcmk_ret_an], 'car_rt_an': [car_rt_an],
             'stgy_vlt_an': [stgy_vlt_an], 'bcmk_vlt_an': [bcmk_vlt_an], 'mdd': [mdd],
             'sharpe': [sharpe], 'alpha': [alpha], 'beta': [beta], 'information': [information],
             'tail_rt': [tail_rt], 'calmar': [calmar], 'omega': [omega], 'sortino': [sortino], 'dsrk': [dsrk]})
        print(f'feedback: \n{self.cls.df_output.T}')
コード例 #8
0
def downside_risk(returns, required_return=0, period=DAILY):
    """
    Determines the downside deviation below a threshold

    Parameters
    ----------
    returns : pd.Series or pd.DataFrame
        Daily returns of the strategy, noncumulative.
        - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
    required_return: float / series
        minimum acceptable return
    period : str, optional
        Defines the periodicity of the 'returns' data for purposes of
        annualizing. Can be 'monthly', 'weekly', or 'daily'.
        - Defaults to 'daily'.

    Returns
    -------
    depends on input type
    series ==> float
    DataFrame ==> np.array

        Annualized downside deviation
    """

    return ep.downside_risk(returns,
                            required_return=required_return,
                            period=period)
コード例 #9
0
 def test_downside_risk(self, returns, required_return, period, expected):
     downside_risk = empyrical.downside_risk(
         returns, required_return=required_return, period=period)
     if isinstance(downside_risk, float):
         assert_almost_equal(downside_risk, expected, DECIMAL_PLACES)
     else:
         for i in range(downside_risk.size):
             assert_almost_equal(downside_risk[i], expected[i],
                                 DECIMAL_PLACES)
コード例 #10
0
    def plot(self):
        # show a plot of portfolio vs mean market performance
        df_info = pd.DataFrame(self.infos)
        df_info.set_index('current step', inplace=True)
        #   df_info.set_index('date', inplace=True)
        rn = np.asarray(df_info['portfolio return'])

        try:
            spf = df_info['portfolio value'].iloc[1]  #   Start portfolio value
            epf = df_info['portfolio value'].iloc[-1]  #   End portfolio value
            pr = (epf - spf) / spf
        except:
            pr = 0

        try:
            sr = sharpe_ratio(rn)
        except:
            sr = 0

        try:
            sor = sortino_ratio(rn)
        except:
            sor = 0

        try:
            mdd = max_drawdown(rn)
        except:
            mdd = 0

        try:
            cr = calmar_ratio(rn)
        except:
            cr = 0

        try:
            om = omega_ratio(rn)
        except:
            om = 0

        try:
            dr = downside_risk(rn)
        except:
            dr = 0

        print("First portfolio value: ",
              np.round(df_info['portfolio value'].iloc[1]))
        print("Last portfolio value: ",
              np.round(df_info['portfolio value'].iloc[-1]))

        title = self.strategy_name + ': ' + 'profit={: 2.2%} sharpe={: 2.2f} sortino={: 2.2f} max drawdown={: 2.2%} calmar={: 2.2f} omega={: 2.2f} downside risk={: 2.2f}'.format(
            pr, sr, sor, mdd, cr, om, dr)
        #   df_info[['market value', 'portfolio value']].plot(title=title, fig=plt.gcf(), figsize=(15,10), rot=30)
        df_info[['portfolio value']].plot(title=title,
                                          fig=plt.gcf(),
                                          figsize=(15, 10),
                                          rot=30)
コード例 #11
0
    def _get_reward(self, current_prices, next_prices):
        if self.compute_reward == compute_reward.profit:
            returns_rate = next_prices / current_prices
            #   pip_value = self._calculate_pip_value_in_account_currency(account_currency.USD, next_prices)
            #   returns_rate = np.multiply(returns_rate, pip_value)
            log_returns = np.log(returns_rate)
            last_weight = self.current_weights
            securities_value = self.current_portfolio_values[:-1] * returns_rate
            self.current_portfolio_values[:-1] = securities_value
            self.current_weights = self.current_portfolio_values / np.sum(
                self.current_portfolio_values)
            reward = last_weight[:-1] * log_returns
        elif self.compute_reward == compute_reward.sharpe:
            try:
                sr = sharpe_ratio(np.asarray(self.returns))
            except:
                sr = 0
            reward = sr
        elif self.compute_reward == compute_reward.sortino:
            try:
                sr = sortino_ratio(np.asarray(self.returns))
            except:
                sr = 0
            reward = sr
        elif self.compute_reward == compute_reward.max_drawdown:
            try:
                mdd = max_drawdown(np.asarray(self.returns))
            except:
                mdd = 0
            reward = mdd
        elif self.compute_reward == compute_reward.calmar:
            try:
                cr = calmar_ratio(np.asarray(self.returns))
            except:
                cr = 0
            reward = cr
        elif self.compute_reward == compute_reward.omega:
            try:
                om = omega_ratio(np.asarray(self.returns))
            except:
                om = 0
            reward = om
        elif self.compute_reward == compute_reward.downside_risk:
            try:
                dr = downside_risk(np.asarray(self.returns))
            except:
                dr = 0
            reward = dr

        try:
            reward = reward.mean()
        except:
            reward = reward

        return reward
コード例 #12
0
def downside_risk(close, required_return=0, period='daily',
                  annualization=None):
    try:
        rets = daily_returns(close)
        dr_data = empyrical.downside_risk(rets,
                                          required_return=required_return,
                                          period=period,
                                          annualization=annualization)
        return dr_data
    except Exception as e:
        raise (e)
コード例 #13
0
def downside_risk(daily_returns,
                  required_return=0,
                  period='daily',
                  annualization=None):
    """Downside Risk"""
    try:
        logger.info('Calculating Downside Risk...')
        dr_data = empyrical.downside_risk(daily_returns,
                                          required_return=required_return,
                                          period=period,
                                          annualization=annualization)
        return dr_data
    except Exception as exception:
        logger.error('Oops! An Error Occurred ⚠️')
        raise exception
コード例 #14
0
    def getStats(cls, returns, benchmark_returns):
        _alpha, _beta = alpha_beta_aligned(
            returns,
            benchmark_returns,
        )

        _sharpe = sharpe_ratio(
            returns
        )

        _downside_risk = downside_risk(
            returns
        )

        _max_drawdown = max_drawdown(
            returns
        )

        _annual_volatility = annual_volatility(
            returns
        )

        _benchmark_volatility = annual_volatility(
            benchmark_returns
        )

        _annual_return = annual_return(
            returns
        )

        _cum_return = cum_returns(
            returns
        )

        return {
            'cum_return' : _cum_return,
            'annual_return' : _annual_return,
            'annual_volatility' : _annual_volatility,
            'benchmark_volatility' : _benchmark_volatility,
            'max_drawdown' : _max_drawdown,
            'downside_risk' : _downside_risk,
            'sharpe ratio' : _sharpe,
            'alpha' : _alpha,
            'beta' : _beta,
        }
コード例 #15
0
def getCombinedIndex(assets, start_date, end_date, factor_weight, **kwargs):
    daily_return_data = getDailyIndexData(assets, start_date, end_date, "CHG_PCT")
    monthly_index_data = getMonthlyIndexData(assets, start_date, end_date)
    monthly_return_data = getMonthlyReturnData(monthly_index_data)

    #计算动量因子
    momentum = getMomentum(monthly_return_data)
    #计算波动率因子
    #volatility = getAnnualizedVolatility(daily_return_data)
    volatility = pd.DataFrame(empyrical.downside_risk(daily_return_data, required_return=0, period='daily'))

    #计算相关性因子
    corr_factor = getCorrelationFactor(daily_return_data)

    #计算资产的三因子排名
    momentum_rank = getRank(momentum, False)
    volatility_rank = getRank(volatility, True)
    corr_factor_rank = getRank(corr_factor, True)

    combined_index = factor_weight["momentum"] * momentum_rank + factor_weight["volatility"] * volatility_rank + factor_weight["corr"] * corr_factor_rank

    #过滤掉动量因子为负的资产
    positive_momentum_asset = []
    for asset in momentum.index:
        if momentum.loc[asset, 0] > 0:
            positive_momentum_asset.append(asset)

    if 'bond' in kwargs.keys():
        bond_assets = kwargs['bond']
        for bond_asset in bond_assets:
            if bond_asset not in positive_momentum_asset:
                positive_momentum_asset.append(bond_asset)

    combined_index = combined_index.loc[positive_momentum_asset]

    return combined_index
コード例 #16
0
ファイル: metric.py プロジェクト: hrocha/zipline-reloaded
    def risk_metric_period(
        cls,
        start_session,
        end_session,
        algorithm_returns,
        benchmark_returns,
        algorithm_leverages,
    ):
        """
        Creates a dictionary representing the state of the risk report.

        Parameters
        ----------
        start_session : pd.Timestamp
            Start of period (inclusive) to produce metrics on
        end_session : pd.Timestamp
            End of period (inclusive) to produce metrics on
        algorithm_returns : pd.Series(pd.Timestamp -> float)
            Series of algorithm returns as of the end of each session
        benchmark_returns : pd.Series(pd.Timestamp -> float)
            Series of benchmark returns as of the end of each session
        algorithm_leverages : pd.Series(pd.Timestamp -> float)
            Series of algorithm leverages as of the end of each session


        Returns
        -------
        risk_metric : dict[str, any]
            Dict of metrics that with fields like:
                {
                    'algorithm_period_return': 0.0,
                    'benchmark_period_return': 0.0,
                    'treasury_period_return': 0,
                    'excess_return': 0.0,
                    'alpha': 0.0,
                    'beta': 0.0,
                    'sharpe': 0.0,
                    'sortino': 0.0,
                    'period_label': '1970-01',
                    'trading_days': 0,
                    'algo_volatility': 0.0,
                    'benchmark_volatility': 0.0,
                    'max_drawdown': 0.0,
                    'max_leverage': 0.0,
                }
        """

        algorithm_returns = algorithm_returns[
            (algorithm_returns.index >= start_session)
            & (algorithm_returns.index <= end_session)]

        # Benchmark needs to be masked to the same dates as the algo returns
        benchmark_returns = benchmark_returns[
            (benchmark_returns.index >= start_session)
            & (benchmark_returns.index <= algorithm_returns.index[-1])]

        benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1]
        algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1]

        alpha, beta = ep.alpha_beta_aligned(
            algorithm_returns.values,
            benchmark_returns.values,
        )
        benchmark_volatility = ep.annual_volatility(benchmark_returns)

        sharpe = ep.sharpe_ratio(algorithm_returns)

        # The consumer currently expects a 0.0 value for sharpe in period,
        # this differs from cumulative which was np.nan.
        # When factoring out the sharpe_ratio, the different return types
        # were collapsed into `np.nan`.
        # TODO: Either fix consumer to accept `np.nan` or make the
        # `sharpe_ratio` return type configurable.
        # In the meantime, convert nan values to 0.0
        if pd.isnull(sharpe):
            sharpe = 0.0

        sortino = ep.sortino_ratio(
            algorithm_returns.values,
            _downside_risk=ep.downside_risk(algorithm_returns.values),
        )

        rval = {
            "algorithm_period_return": algorithm_period_returns,
            "benchmark_period_return": benchmark_period_returns,
            "treasury_period_return": 0,
            "excess_return": algorithm_period_returns,
            "alpha": alpha,
            "beta": beta,
            "sharpe": sharpe,
            "sortino": sortino,
            "period_label": end_session.strftime("%Y-%m"),
            "trading_days": len(benchmark_returns),
            "algo_volatility": ep.annual_volatility(algorithm_returns),
            "benchmark_volatility": benchmark_volatility,
            "max_drawdown": ep.max_drawdown(algorithm_returns.values),
            "max_leverage": algorithm_leverages.max(),
        }

        # check if a field in rval is nan or inf, and replace it with None
        # except period_label which is always a str
        return {
            k: (None if k != "period_label" and not np.isfinite(v) else v)
            for k, v in rval.items()
        }
コード例 #17
0
ファイル: test.py プロジェクト: no7dw/py-practice
    max_drawdown,
    sharpe_ratio,
    sortino_ratio,
    calmar_ratio,
    omega_ratio,
    tail_ratio
)
import pandas as pd
returns = pd.Series(
    index=pd.date_range('2017-03-10', '2017-03-19'),
    data=(-0.012143, 0.045350, 0.030957, 0.004902, 0.002341, -0.02103, 0.00148, 0.004820, -0.00023, 0.01201)
)

benchmark_returns = pd.Series(
    index=pd.date_range('2017-03-10', '2017-03-19'),
    data=(-0.031940, 0.025350, -0.020957, -0.000902, 0.007341, -0.01103, 0.00248, 0.008820, -0.00123, 0.01091)
)
creturns = cum_returns(returns)
max_drawdown(returns)
annual_return(returns)
annual_volatility(returns, period='daily')
calmar_ratio(returns)
omega_ratio(returns=returns, risk_free=0.01)
sharpe_ratio(returns=returns, risk_free=0.01)
sortino_ratio(returns=returns)
downside_risk(returns=returns)
alpha(returns=returns, factor_returns=benchmark_returns, risk_free=0.01)
beta(returns=returns, factor_returns=benchmark_returns, risk_free=0.01)
tail_ratio(returns=returns)

コード例 #18
0
 def test_downside_risk_trans(self, returns, required_return):
     dr_0 = empyrical.downside_risk(returns, -required_return)
     dr_1 = empyrical.downside_risk(returns, 0)
     dr_2 = empyrical.downside_risk(returns, required_return)
     assert dr_0 <= dr_1
     assert dr_1 <= dr_2
コード例 #19
0
 def getDownsideRisk(self, required_return=0, period='daily', annualization=None):
     return empyrical.downside_risk(self.returns, required_return, period, annualization)
コード例 #20
0
ファイル: metric.py プロジェクト: barrygolden/zipline
    def risk_metric_period(cls,
                           start_session,
                           end_session,
                           algorithm_returns,
                           benchmark_returns,
                           algorithm_leverages):
        """
        Creates a dictionary representing the state of the risk report.

        Parameters
        ----------
        start_session : pd.Timestamp
            Start of period (inclusive) to produce metrics on
        end_session : pd.Timestamp
            End of period (inclusive) to produce metrics on
        algorithm_returns : pd.Series(pd.Timestamp -> float)
            Series of algorithm returns as of the end of each session
        benchmark_returns : pd.Series(pd.Timestamp -> float)
            Series of benchmark returns as of the end of each session
        algorithm_leverages : pd.Series(pd.Timestamp -> float)
            Series of algorithm leverages as of the end of each session


        Returns
        -------
        risk_metric : dict[str, any]
            Dict of metrics that with fields like:
                {
                    'algorithm_period_return': 0.0,
                    'benchmark_period_return': 0.0,
                    'treasury_period_return': 0,
                    'excess_return': 0.0,
                    'alpha': 0.0,
                    'beta': 0.0,
                    'sharpe': 0.0,
                    'sortino': 0.0,
                    'period_label': '1970-01',
                    'trading_days': 0,
                    'algo_volatility': 0.0,
                    'benchmark_volatility': 0.0,
                    'max_drawdown': 0.0,
                    'max_leverage': 0.0,
                }
        """

        algorithm_returns = algorithm_returns[
            (algorithm_returns.index >= start_session) &
            (algorithm_returns.index <= end_session)
        ]

        # Benchmark needs to be masked to the same dates as the algo returns
        benchmark_returns = benchmark_returns[
            (benchmark_returns.index >= start_session) &
            (benchmark_returns.index <= algorithm_returns.index[-1])
        ]

        benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1]
        algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1]

        alpha, beta = ep.alpha_beta_aligned(
            algorithm_returns.values,
            benchmark_returns.values,
        )

        sharpe = ep.sharpe_ratio(algorithm_returns)

        # The consumer currently expects a 0.0 value for sharpe in period,
        # this differs from cumulative which was np.nan.
        # When factoring out the sharpe_ratio, the different return types
        # were collapsed into `np.nan`.
        # TODO: Either fix consumer to accept `np.nan` or make the
        # `sharpe_ratio` return type configurable.
        # In the meantime, convert nan values to 0.0
        if pd.isnull(sharpe):
            sharpe = 0.0

        sortino = ep.sortino_ratio(
            algorithm_returns.values,
            _downside_risk=ep.downside_risk(algorithm_returns.values),
        )

        rval = {
            'algorithm_period_return': algorithm_period_returns,
            'benchmark_period_return': benchmark_period_returns,
            'treasury_period_return': 0,
            'excess_return': algorithm_period_returns,
            'alpha': alpha,
            'beta': beta,
            'sharpe': sharpe,
            'sortino': sortino,
            'period_label': end_session.strftime("%Y-%m"),
            'trading_days': len(benchmark_returns),
            'algo_volatility': ep.annual_volatility(algorithm_returns),
            'benchmark_volatility': ep.annual_volatility(benchmark_returns),
            'max_drawdown': ep.max_drawdown(algorithm_returns.values),
            'max_leverage': algorithm_leverages.max(),
        }

        # check if a field in rval is nan or inf, and replace it with None
        # except period_label which is always a str
        return {
            k: (
                None
                if k != 'period_label' and not np.isfinite(v) else
                v
            )
            for k, v in iteritems(rval)
        }
コード例 #21
0
ファイル: cumulative.py プロジェクト: FranSal/zipline
    def update(self, dt, algorithm_returns, benchmark_returns, leverage):
        # Keep track of latest dt for use in to_dict and other methods
        # that report current state.
        self.latest_dt = dt
        dt_loc = self.cont_index.get_loc(dt)
        self.latest_dt_loc = dt_loc

        self.algorithm_returns_cont[dt_loc] = algorithm_returns
        self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1]

        self.num_trading_days = len(self.algorithm_returns)

        if self.create_first_day_stats:
            if len(self.algorithm_returns) == 1:
                self.algorithm_returns = np.append(0.0, self.algorithm_returns)

        self.algorithm_cumulative_returns[dt_loc] = cum_returns(
            self.algorithm_returns
        )[-1]

        algo_cumulative_returns_to_date = \
            self.algorithm_cumulative_returns[:dt_loc + 1]

        self.mean_returns_cont[dt_loc] = \
            algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days

        self.mean_returns = self.mean_returns_cont[:dt_loc + 1]

        self.annualized_mean_returns_cont[dt_loc] = \
            self.mean_returns_cont[dt_loc] * 252

        self.annualized_mean_returns = \
            self.annualized_mean_returns_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.mean_returns) == 1:
                self.mean_returns = np.append(0.0, self.mean_returns)
                self.annualized_mean_returns = np.append(
                    0.0, self.annualized_mean_returns)

        self.benchmark_returns_cont[dt_loc] = benchmark_returns
        self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.benchmark_returns) == 1:
                self.benchmark_returns = np.append(0.0, self.benchmark_returns)

        self.benchmark_cumulative_returns[dt_loc] = cum_returns(
            self.benchmark_returns
        )[-1]

        benchmark_cumulative_returns_to_date = \
            self.benchmark_cumulative_returns[:dt_loc + 1]

        self.mean_benchmark_returns_cont[dt_loc] = \
            benchmark_cumulative_returns_to_date[dt_loc] / \
            self.num_trading_days

        self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc]

        self.annualized_mean_benchmark_returns_cont[dt_loc] = \
            self.mean_benchmark_returns_cont[dt_loc] * 252

        self.annualized_mean_benchmark_returns = \
            self.annualized_mean_benchmark_returns_cont[:dt_loc + 1]

        self.algorithm_cumulative_leverages_cont[dt_loc] = leverage
        self.algorithm_cumulative_leverages = \
            self.algorithm_cumulative_leverages_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.algorithm_cumulative_leverages) == 1:
                self.algorithm_cumulative_leverages = np.append(
                    0.0,
                    self.algorithm_cumulative_leverages)

        if not len(self.algorithm_returns) and len(self.benchmark_returns):
            message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end} on {dt}"
            message = message.format(
                bm_count=len(self.benchmark_returns),
                algo_count=len(self.algorithm_returns),
                start=self.start_session,
                end=self.end_session,
                dt=dt
            )
            raise Exception(message)

        self.update_current_max()
        self.benchmark_volatility[dt_loc] = annual_volatility(
            self.benchmark_returns
        )
        self.algorithm_volatility[dt_loc] = annual_volatility(
            self.algorithm_returns
        )

        # caching the treasury rates for the minutely case is a
        # big speedup, because it avoids searching the treasury
        # curves on every minute.
        # In both minutely and daily, the daily curve is always used.
        treasury_end = dt.replace(hour=0, minute=0)
        if np.isnan(self.daily_treasury[treasury_end]):
            treasury_period_return = choose_treasury(
                self.treasury_curves,
                self.start_session,
                treasury_end,
                self.trading_calendar,
            )
            self.daily_treasury[treasury_end] = treasury_period_return
        self.treasury_period_return = self.daily_treasury[treasury_end]
        self.excess_returns[dt_loc] = (
            self.algorithm_cumulative_returns[dt_loc] -
            self.treasury_period_return)

        self.alpha[dt_loc], self.beta[dt_loc] = alpha_beta_aligned(
            self.algorithm_returns,
            self.benchmark_returns,
        )
        self.sharpe[dt_loc] = sharpe_ratio(
            self.algorithm_returns,
        )
        self.downside_risk[dt_loc] = downside_risk(
            self.algorithm_returns
        )
        self.sortino[dt_loc] = sortino_ratio(
            self.algorithm_returns,
            _downside_risk=self.downside_risk[dt_loc]
        )
        self.information[dt_loc] = information_ratio(
            self.algorithm_returns,
            self.benchmark_returns,
        )
        self.max_drawdown = max_drawdown(
            self.algorithm_returns
        )
        self.max_drawdowns[dt_loc] = self.max_drawdown
        self.max_leverage = self.calculate_max_leverage()
        self.max_leverages[dt_loc] = self.max_leverage
コード例 #22
0
ファイル: period.py プロジェクト: FranSal/zipline
    def calculate_metrics(self):
        self.benchmark_period_returns = \
            cum_returns(self.benchmark_returns).iloc[-1]

        self.algorithm_period_returns = \
            cum_returns(self.algorithm_returns).iloc[-1]

        if not self.algorithm_returns.index.equals(
            self.benchmark_returns.index
        ):
            message = "Mismatch between benchmark_returns ({bm_count}) and \
            algorithm_returns ({algo_count}) in range {start} : {end}"
            message = message.format(
                bm_count=len(self.benchmark_returns),
                algo_count=len(self.algorithm_returns),
                start=self._start_session,
                end=self._end_session
            )
            raise Exception(message)

        self.num_trading_days = len(self.benchmark_returns)

        self.mean_algorithm_returns = (
            self.algorithm_returns.cumsum() /
            np.arange(1, self.num_trading_days + 1, dtype=np.float64)
        )

        self.benchmark_volatility = annual_volatility(self.benchmark_returns)
        self.algorithm_volatility = annual_volatility(self.algorithm_returns)

        self.treasury_period_return = choose_treasury(
            self.treasury_curves,
            self._start_session,
            self._end_session,
            self.trading_calendar,
        )
        self.sharpe = sharpe_ratio(
            self.algorithm_returns,
        )
        # The consumer currently expects a 0.0 value for sharpe in period,
        # this differs from cumulative which was np.nan.
        # When factoring out the sharpe_ratio, the different return types
        # were collapsed into `np.nan`.
        # TODO: Either fix consumer to accept `np.nan` or make the
        # `sharpe_ratio` return type configurable.
        # In the meantime, convert nan values to 0.0
        if pd.isnull(self.sharpe):
            self.sharpe = 0.0
        self.downside_risk = downside_risk(
            self.algorithm_returns.values
        )
        self.sortino = sortino_ratio(
            self.algorithm_returns.values,
            _downside_risk=self.downside_risk,
        )
        self.information = information_ratio(
            self.algorithm_returns.values,
            self.benchmark_returns.values,
        )
        self.alpha, self.beta = alpha_beta_aligned(
            self.algorithm_returns.values,
            self.benchmark_returns.values,
        )
        self.excess_return = self.algorithm_period_returns - \
            self.treasury_period_return
        self.max_drawdown = max_drawdown(self.algorithm_returns.values)
        self.max_leverage = self.calculate_max_leverage()
コード例 #23
0
    def update(self, dt, algorithm_returns, benchmark_returns, leverage):
        # Keep track of latest dt for use in to_dict and other methods
        # that report current state.
        self.latest_dt = dt
        dt_loc = self.cont_index.get_loc(dt)
        self.latest_dt_loc = dt_loc

        self.algorithm_returns_cont[dt_loc] = algorithm_returns
        self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1]

        self.num_trading_days = len(self.algorithm_returns)

        if self.create_first_day_stats:
            if len(self.algorithm_returns) == 1:
                self.algorithm_returns = np.append(0.0, self.algorithm_returns)

        self.algorithm_cumulative_returns[dt_loc] = cum_returns(
            self.algorithm_returns)[-1]

        algo_cumulative_returns_to_date = \
            self.algorithm_cumulative_returns[:dt_loc + 1]

        self.mean_returns_cont[dt_loc] = \
            algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days

        self.mean_returns = self.mean_returns_cont[:dt_loc + 1]

        self.annualized_mean_returns_cont[dt_loc] = \
            self.mean_returns_cont[dt_loc] * 252

        self.annualized_mean_returns = \
            self.annualized_mean_returns_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.mean_returns) == 1:
                self.mean_returns = np.append(0.0, self.mean_returns)
                self.annualized_mean_returns = np.append(
                    0.0, self.annualized_mean_returns)

        self.benchmark_returns_cont[dt_loc] = benchmark_returns
        self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.benchmark_returns) == 1:
                self.benchmark_returns = np.append(0.0, self.benchmark_returns)

        self.benchmark_cumulative_returns[dt_loc] = cum_returns(
            self.benchmark_returns)[-1]

        benchmark_cumulative_returns_to_date = \
            self.benchmark_cumulative_returns[:dt_loc + 1]

        self.mean_benchmark_returns_cont[dt_loc] = \
            benchmark_cumulative_returns_to_date[dt_loc] / \
            self.num_trading_days

        self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc]

        self.annualized_mean_benchmark_returns_cont[dt_loc] = \
            self.mean_benchmark_returns_cont[dt_loc] * 252

        self.annualized_mean_benchmark_returns = \
            self.annualized_mean_benchmark_returns_cont[:dt_loc + 1]

        self.algorithm_cumulative_leverages_cont[dt_loc] = leverage
        self.algorithm_cumulative_leverages = \
            self.algorithm_cumulative_leverages_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.algorithm_cumulative_leverages) == 1:
                self.algorithm_cumulative_leverages = np.append(
                    0.0, self.algorithm_cumulative_leverages)

        if not len(self.algorithm_returns) and len(self.benchmark_returns):
            message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end} on {dt}"

            message = message.format(bm_count=len(self.benchmark_returns),
                                     algo_count=len(self.algorithm_returns),
                                     start=self.start_session,
                                     end=self.end_session,
                                     dt=dt)
            raise Exception(message)

        self.update_current_max()
        self.benchmark_volatility[dt_loc] = annual_volatility(
            self.benchmark_returns)
        self.algorithm_volatility[dt_loc] = annual_volatility(
            self.algorithm_returns)

        # caching the treasury rates for the minutely case is a
        # big speedup, because it avoids searching the treasury
        # curves on every minute.
        # In both minutely and daily, the daily curve is always used.
        treasury_end = dt.replace(hour=0, minute=0)
        if np.isnan(self.daily_treasury[treasury_end]):
            treasury_period_return = choose_treasury(
                self.treasury_curves,
                self.start_session,
                treasury_end,
                self.trading_calendar,
            )
            self.daily_treasury[treasury_end] = treasury_period_return
        self.treasury_period_return = self.daily_treasury[treasury_end]
        self.excess_returns[dt_loc] = (
            self.algorithm_cumulative_returns[dt_loc] -
            self.treasury_period_return)

        self.alpha[dt_loc], self.beta[dt_loc] = alpha_beta_aligned(
            self.algorithm_returns,
            self.benchmark_returns,
        )
        self.sharpe[dt_loc] = sharpe_ratio(self.algorithm_returns, )
        self.downside_risk[dt_loc] = downside_risk(self.algorithm_returns)
        self.sortino[dt_loc] = sortino_ratio(
            self.algorithm_returns, _downside_risk=self.downside_risk[dt_loc])
        self.max_drawdown = max_drawdown(self.algorithm_returns)
        self.max_drawdowns[dt_loc] = self.max_drawdown
        self.max_leverage = self.calculate_max_leverage()
        self.max_leverages[dt_loc] = self.max_leverage
コード例 #24
0
ファイル: backtesting.py プロジェクト: gcoinman/vnpy
    def calculate_statistics(self, df: DataFrame = None, output=True):
        """"""
        self.output("开始计算策略统计指标")

        # Check DataFrame input exterior
        if df is None:
            df = self.daily_df

        # Check for init DataFrame
        if df is None:
            # Set all statistics to 0 if no trade.
            start_date = ""
            end_date = ""
            total_days = 0
            profit_days = 0
            loss_days = 0
            end_balance = 0
            max_drawdown = 0
            max_ddpercent = 0
            max_drawdown_duration = 0
            max_drawdown_end = 0
            total_net_pnl = 0
            daily_net_pnl = 0
            total_commission = 0
            daily_commission = 0
            total_slippage = 0
            daily_slippage = 0
            total_turnover = 0
            daily_turnover = 0
            total_trade_count = 0
            daily_trade_count = 0
            total_return = 0
            annual_return = 0
            daily_return = 0
            return_std = 0
            sharpe_ratio = 0
            sortino_info = 0
            win_ratio = 0
            return_drawdown_ratio = 0
            tail_ratio_info = 0
            stability_return = 0
            win_loss_pnl_ratio = 0
            pnl_medio = 0
            duration_medio = 0
            calmar_ratio = 0
        else:
            # Calculate balance related time series data
            df["balance"] = df["net_pnl"].cumsum() + self.capital
            df["return"] = np.log(df["balance"] /
                                  df["balance"].shift(1)).fillna(0)
            df["highlevel"] = (df["balance"].rolling(min_periods=1,
                                                     window=len(df),
                                                     center=False).max())
            df["drawdown"] = df["balance"] - df["highlevel"]
            df["ddpercent"] = df["drawdown"] / df["highlevel"] * 100

            # Calculate statistics value
            start_date = df.index[0]
            end_date = df.index[-1]

            total_days = len(df)
            profit_days = len(df[df["net_pnl"] > 0])
            loss_days = len(df[df["net_pnl"] < 0])

            end_balance = df["balance"].iloc[-1]
            max_drawdown = df["drawdown"].min()
            max_ddpercent = df["ddpercent"].min()
            max_drawdown_end = df["drawdown"].idxmin()

            if isinstance(max_drawdown_end, date):
                max_drawdown_start = df["balance"][:max_drawdown_end].idxmax()
                max_drawdown_duration = (max_drawdown_end -
                                         max_drawdown_start).days
            else:
                max_drawdown_duration = 0

            total_net_pnl = df["net_pnl"].sum()
            daily_net_pnl = total_net_pnl / total_days

            win = df[df["net_pnl"] > 0]
            win_amount = win["net_pnl"].sum()
            win_pnl_medio = win["net_pnl"].mean()
            # win_duration_medio = win["duration"].mean().total_seconds()/3600
            win_count = win["trade_count"].sum()
            pnl_medio = df["net_pnl"].mean()
            # duration_medio = df["duration"].mean().total_seconds()/3600

            loss = df[df["net_pnl"] < 0]
            loss_amount = loss["net_pnl"].sum()
            loss_pnl_medio = loss["net_pnl"].mean()
            # loss_duration_medio = loss["duration"].mean().total_seconds()/3600

            total_commission = df["commission"].sum()
            daily_commission = total_commission / total_days

            total_slippage = df["slippage"].sum()
            daily_slippage = total_slippage / total_days

            total_turnover = df["turnover"].sum()
            daily_turnover = total_turnover / total_days

            total_trade_count = df["trade_count"].sum()
            win_ratio = (win_count / total_trade_count) * 100
            win_loss_pnl_ratio = -win_pnl_medio / loss_pnl_medio
            daily_trade_count = total_trade_count / total_days

            total_return = (end_balance / self.capital - 1) * 100
            annual_return = total_return / total_days * 240
            daily_return = df["return"].mean() * 100
            return_std = df["return"].std() * 100

            if return_std:
                sharpe_ratio = daily_return / return_std * np.sqrt(240)
            else:
                sharpe_ratio = 0

            return_drawdown_ratio = -total_return / max_ddpercent

            #calmar_ratio:年化收益率与历史最大回撤率之间的比率
            calmar_ratio = annual_return / abs(max_ddpercent)

            #sortino_info
            sortino_info = sortino_ratio(df['return'])
            omega_info = omega_ratio(df['return'])
            #年化波动率
            annual_volatility_info = annual_volatility(df['return'])
            #年化复合增长率
            cagr_info = cagr(df['return'])
            #年化下行风险率
            annual_downside_risk = downside_risk(df['return'])
            """CVaR即条件风险价值,其含义为在投资组合的损失超过某个给定VaR值的条件下,该投资组合的平均损失值。"""
            c_var = conditional_value_at_risk(df['return'])
            """风险价值(VaR)是对投资损失风险的一种度量。它估计在正常的市场条件下,在设定的时间段(例如一天)中,
            一组投资可能(以给定的概率)损失多少。金融业中的公司和监管机构通常使用VaR来衡量弥补可能损失所需的资产数量"""
            var_info = value_at_risk(df['return'])

            #收益稳定率
            stability_return = stability_of_timeseries(df['return'])
            #尾部比率0.25 == 1/4,收益1,风险4
            tail_ratio_info = tail_ratio(df['return'])

        # Output
        if output:
            self.output("-" * 30)
            self.output(f"首个交易日:\t{start_date}")
            self.output(f"最后交易日:\t{end_date}")

            self.output(f"总交易日:\t{total_days}")
            self.output(f"盈利交易日:\t{profit_days}")
            self.output(f"亏损交易日:\t{loss_days}")

            self.output(f"起始资金:\t{self.capital:,.2f}")
            self.output(f"结束资金:\t{end_balance:,.2f}")

            self.output(f"总收益率:\t{total_return:,.2f}%")
            self.output(f"年化收益:\t{annual_return:,.2f}%")
            self.output(f"最大回撤: \t{max_drawdown:,.2f}")
            self.output(f"百分比最大回撤: {max_ddpercent:,.2f}%")
            self.output(f"最长回撤天数: \t{max_drawdown_duration}")

            self.output(f"总盈亏:\t{total_net_pnl:,.2f}")
            self.output(f"总手续费:\t{total_commission:,.2f}")
            self.output(f"总滑点:\t{total_slippage:,.2f}")
            self.output(f"总成交金额:\t{total_turnover:,.2f}")
            self.output(f"总成交笔数:\t{total_trade_count}")

            self.output(f"日均盈亏:\t{daily_net_pnl:,.2f}")
            self.output(f"日均手续费:\t{daily_commission:,.2f}")
            self.output(f"日均滑点:\t{daily_slippage:,.2f}")
            self.output(f"日均成交金额:\t{daily_turnover:,.2f}")
            self.output(f"日均成交笔数:\t{daily_trade_count}")

            self.output(f"日均收益率:\t{daily_return:,.2f}%")
            self.output(f"收益标准差:\t{return_std:,.2f}%")
            self.output(f"胜率:\t{win_ratio:,.2f}")
            self.output(f"盈亏比:\t\t{win_loss_pnl_ratio:,.2f}")

            self.output(f"平均每笔盈亏:\t{pnl_medio:,.2f}")
            self.output(f"calmar_ratio:\t{calmar_ratio:,.3f}")
            # self.output(f"平均持仓小时:\t{duration_medio:,.2f}")
            self.output(f"Sharpe Ratio:\t{sharpe_ratio:,.2f}")
            self.output(f"sortino Ratio:\t{sortino_info:,.3f}")
            self.output(f"收益回撤比:\t{return_drawdown_ratio:,.2f}")

        statistics = {
            "start_date": start_date,
            "end_date": end_date,
            "total_days": total_days,
            "profit_days": profit_days,
            "loss_days": loss_days,
            "capital": self.capital,
            "end_balance": end_balance,
            "max_drawdown": max_drawdown,
            "max_ddpercent": max_ddpercent,
            "max_drawdown_end": max_drawdown_end,
            "max_drawdown_duration": max_drawdown_duration,
            "total_net_pnl": total_net_pnl,
            "daily_net_pnl": daily_net_pnl,
            "total_commission": total_commission,
            "daily_commission": daily_commission,
            "total_slippage": total_slippage,
            "daily_slippage": daily_slippage,
            "total_turnover": total_turnover,
            "daily_turnover": daily_turnover,
            "total_trade_count": total_trade_count,
            "daily_trade_count": daily_trade_count,
            "total_return": total_return,
            "annual_return": annual_return,
            "daily_return": daily_return,
            "return_std": return_std,
            "sharpe_ratio": sharpe_ratio,
            'sortino_info': sortino_info,
            "win_ratio": win_ratio,
            "return_drawdown_ratio": return_drawdown_ratio,
            "tail_ratio_info": tail_ratio_info,
            "stability_return": stability_return,
            "win_loss_pnl_ratio": win_loss_pnl_ratio,
            "pnl_medio": pnl_medio,
            "calmar_ratio": calmar_ratio
        }

        # Filter potential error infinite value
        for key, value in statistics.items():
            if value in (np.inf, -np.inf):
                value = 0
            statistics[key] = np.nan_to_num(value)

        self.output("策略统计指标计算完成")
        return statistics
コード例 #25
0
model_price.to(device)

trading_threshold = 0.6

with torch.no_grad():
    print('Threshold: ', trading_threshold)
    profits = []
    for i in tqdm(range(len(testdata))):
        embedding = testdata[i]["embedding"].to(device).unsqueeze(0)
        length_data = testdata[i]["length_data"].to(device).unsqueeze(0)
        time_feats = testdata[i]["time_feature"].to(device).squeeze(
            -1).unsqueeze(0)

        cp_last = testdata_1[i]["cp_last"]
        cp_target = testdata_1[i]["cp_target"]

        outputs_price, _ = model_price(embedding, length_data, time_feats)
        outputs_probs = torch.nn.functional.softmax(outputs_price,
                                                    dim=-1).squeeze(0)
        if (torch.max(outputs_probs) > trading_threshold):
            if torch.argmax(outputs_price) == 1:
                profits.append(cp_target - cp_last)
            else:
                profits.append(cp_last - cp_target)

    profits = np.array(profits)
    sortino = sortino_ratio(profits)
    downside = downside_risk(profits)
    print("Downside Risk: ", downside)
    print("Sortino Ratio: ", sortino)