def __init__(self, wrapper, records_arr, close, idx_field='exit_idx', trade_type=TradeType.Trade, **kwargs): Records.__init__(self, wrapper, records_arr, idx_field=idx_field, close=close, trade_type=trade_type, **kwargs) self._close = broadcast_to(close, wrapper.dummy(group_by=False)) self._trade_type = trade_type if trade_type == TradeType.Trade: if not all(field in records_arr.dtype.names for field in trade_dt.names): raise TypeError("Records array must match trade_dt") else: if not all(field in records_arr.dtype.names for field in position_dt.names): raise TypeError("Records array must match position_dt")
def rolling_down_capture(self, window, benchmark_rets, minp=None, wrap_kwargs=None): """Rolling version of `ReturnsAccessor.down_capture`.""" benchmark_rets = reshape_fns.broadcast_to( reshape_fns.to_2d(benchmark_rets, raw=True), reshape_fns.to_2d(self._obj, raw=True)) return self.wrapper.wrap(nb.rolling_down_capture_nb( self.to_2d_array(), window, minp, benchmark_rets, self.ann_factor ), **merge_dicts({}, wrap_kwargs))
def rolling_information_ratio(self, window, benchmark_rets, minp=None, ddof=1, wrap_kwargs=None): """Rolling version of `ReturnsAccessor.information_ratio`.""" benchmark_rets = reshape_fns.broadcast_to( reshape_fns.to_2d(benchmark_rets, raw=True), reshape_fns.to_2d(self._obj, raw=True)) return self.wrapper.wrap(nb.rolling_information_ratio_nb( self.to_2d_array(), window, minp, benchmark_rets, ddof ), **merge_dicts({}, wrap_kwargs))
def beta(self, factor_returns): """Beta. Args: factor_returns (array_like): Benchmark return to compare returns against. Will broadcast.""" factor_returns = reshape_fns.broadcast_to( reshape_fns.to_2d(factor_returns, raw=True), reshape_fns.to_2d(self._obj, raw=True)) return self.wrap_reduced(nb.beta_nb(self.to_2d_array(), factor_returns))
def down_capture(self, factor_returns): """Capture ratio for periods when the benchmark return is negative. Args: factor_returns (array_like): Benchmark return to compare returns against. Will broadcast.""" factor_returns = reshape_fns.broadcast_to( reshape_fns.to_2d(factor_returns, raw=True), reshape_fns.to_2d(self._obj, raw=True)) return self.wrap_reduced(nb.down_capture_nb(self.to_2d_array(), factor_returns, self.ann_factor))
def information_ratio(self, factor_returns): """Information ratio of a strategy. Args: factor_returns (array_like): Benchmark return to compare returns against. Will broadcast.""" factor_returns = reshape_fns.broadcast_to( reshape_fns.to_2d(factor_returns, raw=True), reshape_fns.to_2d(self._obj, raw=True)) return self.wrap_reduced(nb.information_ratio_nb(self.to_2d_array(), factor_returns))
def alpha(self, factor_returns, risk_free=0.): """Annualized alpha. Args: factor_returns (array_like): Benchmark return to compare returns against. Will broadcast. risk_free (float or array_like): Constant risk-free return throughout the period.""" factor_returns = reshape_fns.broadcast_to( reshape_fns.to_2d(factor_returns, raw=True), reshape_fns.to_2d(self._obj, raw=True)) risk_free = np.broadcast_to(risk_free, (len(self.columns),)) return self.wrap_reduced(nb.alpha_nb(self.to_2d_array(), factor_returns, self.ann_factor, risk_free))
def capture(self, benchmark_rets): """Capture ratio. Args: benchmark_rets (array_like): Benchmark return to compare returns against. Will broadcast per element.""" benchmark_rets = reshape_fns.broadcast_to( reshape_fns.to_2d(benchmark_rets, raw=True), reshape_fns.to_2d(self._obj, raw=True)) return self.wrapper.wrap_reduced( nb.capture_nb(self.to_2d_array(), benchmark_rets, self.ann_factor))
def beta(self, benchmark_rets): """Beta. Args: benchmark_rets (array_like): Benchmark return to compare returns against. Will broadcast per element.""" benchmark_rets = reshape_fns.broadcast_to( reshape_fns.to_2d(benchmark_rets, raw=True), reshape_fns.to_2d(self._obj, raw=True)) return self.wrapper.wrap_reduced( nb.beta_nb(self.to_2d_array(), benchmark_rets))
def information_ratio(self, benchmark_rets, ddof=1, wrap_kwargs=None): """Information ratio of a strategy. Args: benchmark_rets (array_like): Benchmark return to compare returns against. Will broadcast.""" benchmark_rets = reshape_fns.broadcast_to( reshape_fns.to_2d(benchmark_rets, raw=True), reshape_fns.to_2d(self._obj, raw=True)) wrap_kwargs = merge_dicts(dict(name_or_index='information_ratio'), wrap_kwargs) return self.wrapper.wrap_reduced(nb.information_ratio_nb( self.to_2d_array(), benchmark_rets, ddof ), **wrap_kwargs)
def information_ratio(self, benchmark_rets): """Information ratio of a strategy. Args: benchmark_rets (array_like): Benchmark return to compare returns against. Will broadcast per element.""" benchmark_rets = reshape_fns.broadcast_to( reshape_fns.to_2d(benchmark_rets, raw=True), reshape_fns.to_2d(self._obj, raw=True)) return self.wrapper.wrap_reduced( nb.information_ratio_nb(self.to_2d_array(), benchmark_rets))
def down_capture(self, benchmark_rets, wrap_kwargs=None): """Capture ratio for periods when the benchmark return is negative. Args: benchmark_rets (array_like): Benchmark return to compare returns against. Will broadcast.""" benchmark_rets = reshape_fns.broadcast_to( reshape_fns.to_2d(benchmark_rets, raw=True), reshape_fns.to_2d(self._obj, raw=True)) wrap_kwargs = merge_dicts(dict(name_or_index='down_capture'), wrap_kwargs) return self.wrapper.wrap_reduced(nb.down_capture_nb( self.to_2d_array(), benchmark_rets, self.ann_factor ), **wrap_kwargs)
def __init__(self, wrapper, records_arr, close, idx_field='idx', **kwargs): Records.__init__(self, wrapper, records_arr, idx_field=idx_field, close=close, **kwargs) self._close = broadcast_to(close, wrapper.dummy(group_by=False)) if not all(field in records_arr.dtype.names for field in order_dt.names): raise TypeError("Records array must match order_dt")
def beta(self, benchmark_rets, wrap_kwargs=None): """Beta. Args: benchmark_rets (array_like): Benchmark return to compare returns against. Will broadcast.""" benchmark_rets = reshape_fns.broadcast_to( reshape_fns.to_2d(benchmark_rets, raw=True), reshape_fns.to_2d(self._obj, raw=True)) wrap_kwargs = merge_dicts(dict(name_or_index='beta'), wrap_kwargs) return self.wrapper.wrap_reduced(nb.beta_nb( self.to_2d_array(), benchmark_rets ), **wrap_kwargs)
def rolling_capture(self, window: int, benchmark_rets: tp.ArrayLike, minp: tp.Optional[int] = None, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: """Rolling version of `ReturnsAccessor.capture`.""" benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self._obj, raw=True)) result = nb.rolling_capture_nb(self.to_2d_array(), window, minp, benchmark_rets, self.ann_factor) wrap_kwargs = merge_dicts({}, wrap_kwargs) return self.wrapper.wrap(result, **wrap_kwargs)
def beta(self, benchmark_rets: tp.ArrayLike, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries: """Beta. Args: benchmark_rets (array_like): Benchmark return to compare returns against. Will broadcast.""" benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self._obj, raw=True)) result = nb.beta_nb(self.to_2d_array(), benchmark_rets) wrap_kwargs = merge_dicts(dict(name_or_index='beta'), wrap_kwargs) return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def indexing_on_mapper(mapper, ref_obj, pd_indexing_func): """Broadcast `mapper` Series to `ref_obj` and perform pandas indexing using `pd_indexing_func`.""" checks.assert_type(mapper, pd.Series) checks.assert_type(ref_obj, (pd.Series, pd.DataFrame)) df_range_mapper = reshape_fns.broadcast_to(np.arange(len(mapper.index)), ref_obj) loced_range_mapper = pd_indexing_func(df_range_mapper) new_mapper = mapper.iloc[loced_range_mapper.values[0]] if checks.is_frame(loced_range_mapper): return pd.Series(new_mapper.values, index=loced_range_mapper.columns, name=mapper.name) elif checks.is_series(loced_range_mapper): return pd.Series([new_mapper], index=[loced_range_mapper.name], name=mapper.name)
def alpha(self, benchmark_rets, risk_free=0., wrap_kwargs=None): """Annualized alpha. Args: benchmark_rets (array_like): Benchmark return to compare returns against. Will broadcast. risk_free (float or array_like): Constant risk-free return throughout the period.""" benchmark_rets = reshape_fns.broadcast_to( reshape_fns.to_2d(benchmark_rets, raw=True), reshape_fns.to_2d(self._obj, raw=True)) wrap_kwargs = merge_dicts(dict(name_or_index='alpha'), wrap_kwargs) return self.wrapper.wrap_reduced(nb.alpha_nb( self.to_2d_array(), benchmark_rets, self.ann_factor, risk_free ), **wrap_kwargs)
def down_capture(self, benchmark_rets: tp.ArrayLike, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries: """Capture ratio for periods when the benchmark return is negative. Args: benchmark_rets (array_like): Benchmark return to compare returns against. Will broadcast.""" benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self._obj, raw=True)) result = nb.down_capture_nb(self.to_2d_array(), benchmark_rets, self.ann_factor) wrap_kwargs = merge_dicts(dict(name_or_index='down_capture'), wrap_kwargs) return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_information_ratio( self, window: int, benchmark_rets: tp.ArrayLike, minp: tp.Optional[int] = None, ddof: int = 1, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: """Rolling version of `ReturnsAccessor.information_ratio`.""" wrap_kwargs = merge_dicts({}, wrap_kwargs) benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self._obj, raw=True)) result = nb.rolling_information_ratio_nb(self.to_2d_array(), window, minp, benchmark_rets, ddof) return self.wrapper.wrap(result, **wrap_kwargs)
def indexing_on_mapper(mapper: tp.Series, ref_obj: tp.SeriesFrame, pd_indexing_func: tp.Callable) -> tp.Optional[tp.Series]: """Broadcast `mapper` Series to `ref_obj` and perform pandas indexing using `pd_indexing_func`.""" checks.assert_instance_of(mapper, pd.Series) checks.assert_instance_of(ref_obj, (pd.Series, pd.DataFrame)) df_range_mapper = reshape_fns.broadcast_to(np.arange(len(mapper.index)), ref_obj) loced_range_mapper = pd_indexing_func(df_range_mapper) new_mapper = mapper.iloc[loced_range_mapper.values[0]] if checks.is_frame(loced_range_mapper): return pd.Series(new_mapper.values, index=loced_range_mapper.columns, name=mapper.name) elif checks.is_series(loced_range_mapper): return pd.Series([new_mapper], index=[loced_range_mapper.name], name=mapper.name) return None
def alpha(self, benchmark_rets: tp.ArrayLike, risk_free: float = 0., wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries: """Annualized alpha. Args: benchmark_rets (array_like): Benchmark return to compare returns against. Will broadcast. risk_free (float): Constant risk-free return throughout the period.""" benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self._obj, raw=True)) result = nb.alpha_nb(self.to_2d_array(), benchmark_rets, self.ann_factor, risk_free) wrap_kwargs = merge_dicts(dict(name_or_index='alpha'), wrap_kwargs) return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def information_ratio(self, benchmark_rets: tp.ArrayLike, ddof: int = 1, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries: """Information ratio of a strategy. Args: benchmark_rets (array_like): Benchmark return to compare returns against. Will broadcast.""" benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self._obj, raw=True)) result = nb.information_ratio_nb(self.to_2d_array(), benchmark_rets, ddof) wrap_kwargs = merge_dicts(dict(name_or_index='information_ratio'), wrap_kwargs) return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def __init__(self, wrapper: ArrayWrapper, records_arr: tp.RecordArray, ts: tp.ArrayLike, idx_field: str = 'end_idx', **kwargs) -> None: Records.__init__(self, wrapper, records_arr, idx_field=idx_field, ts=ts, **kwargs) self._ts = broadcast_to(ts, wrapper.dummy(group_by=False)) if not all(field in records_arr.dtype.names for field in drawdown_dt.names): raise TypeError("Records array must match drawdown_dt")
def _indexing_func(obj, pd_indexing_func): """Perform indexing on `Portfolio`.""" if obj.wrapper.ndim == 1: raise TypeError("Indexing on Series is not supported") n_rows = len(obj.wrapper.index) n_cols = len(obj.wrapper.columns) col_mapper = obj.wrapper.wrap( np.broadcast_to(np.arange(n_cols), (n_rows, n_cols))) col_mapper = pd_indexing_func(col_mapper) if not pd.Index.equals(col_mapper.index, obj.wrapper.index): raise NotImplementedError( "Changing index (time axis) is not supported") new_cols = col_mapper.values[0] # Array-like params def index_arraylike_param(param): if np.asarray(param).ndim > 0: param = reshape_fns.broadcast_to_axis_of(param, obj.main_price, 1) param = param[new_cols] return param factor_returns = obj.factor_returns if factor_returns is not None: if checks.is_frame(factor_returns): factor_returns = reshape_fns.broadcast_to(factor_returns, obj.main_price) factor_returns = pd_indexing_func(factor_returns) # Create new Portfolio instance return obj.__class__( pd_indexing_func(obj.main_price), obj.init_capital.iloc[new_cols], pd_indexing_func(obj.orders), # Orders class supports indexing pd_indexing_func(obj.cash), pd_indexing_func(obj.shares), freq=obj.freq, year_freq=obj.year_freq, levy_alpha=index_arraylike_param(obj.levy_alpha), risk_free=index_arraylike_param(obj.risk_free), required_return=index_arraylike_param(obj.required_return), cutoff=index_arraylike_param(obj.cutoff), factor_returns=factor_returns, incl_unrealized_stats=obj.incl_unrealized_stats)
def broadcast_to(self, other, **kwargs): """See `vectorbt.base.reshape_fns.broadcast_to`.""" if isinstance(other, Base_Accessor): other = other._obj return reshape_fns.broadcast_to(self._obj, other, **kwargs)
def plot_cum_returns(self, benchmark_rets: tp.Optional[tp.ArrayLike] = None, start_value: float = 1, fill_to_benchmark: bool = False, main_kwargs: tp.KwargsLike = None, benchmark_kwargs: tp.KwargsLike = None, hline_shape_kwargs: tp.KwargsLike = None, add_trace_kwargs: tp.KwargsLike = None, xref: str = 'x', yref: str = 'y', fig: tp.Optional[tp.BaseFigure] = None, **layout_kwargs) -> tp.BaseFigure: # pragma: no cover """Plot cumulative returns. Args: benchmark_rets (array_like): Benchmark return to compare returns against. Will broadcast per element. start_value (float): The starting returns. fill_to_benchmark (bool): Whether to fill between main and benchmark, or between main and `start_value`. main_kwargs (dict): Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot` for main. benchmark_kwargs (dict): Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot` for benchmark. hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for `start_value` line. add_trace_kwargs (dict): Keyword arguments passed to `add_trace`. xref (str): X coordinate axis. yref (str): Y coordinate axis. fig (Figure or FigureWidget): Figure to add traces to. **layout_kwargs: Keyword arguments for layout. ## Example ```python-repl >>> import pandas as pd >>> import numpy as np >>> np.random.seed(0) >>> rets = pd.Series(np.random.uniform(-0.05, 0.05, size=100)) >>> benchmark_rets = pd.Series(np.random.uniform(-0.05, 0.05, size=100)) >>> rets.vbt.returns.plot_cum_returns(benchmark_rets=benchmark_rets) ``` ![](/vectorbt/docs/img/plot_cum_returns.svg) """ from vectorbt._settings import settings plotting_cfg = settings['plotting'] if fig is None: fig = make_figure() fig.update_layout(**layout_kwargs) x_domain = [0, 1] xaxis = 'xaxis' + xref[1:] if xaxis in fig.layout: if 'domain' in fig.layout[xaxis]: if fig.layout[xaxis]['domain'] is not None: x_domain = fig.layout[xaxis]['domain'] fill_to_benchmark = fill_to_benchmark and benchmark_rets is not None if benchmark_rets is not None: # Plot benchmark benchmark_rets = broadcast_to(benchmark_rets, self._obj) if benchmark_kwargs is None: benchmark_kwargs = {} benchmark_kwargs = merge_dicts( dict(trace_kwargs=dict(line_color=plotting_cfg['color_schema'] ['gray'], name='Benchmark')), benchmark_kwargs) benchmark_cumrets = benchmark_rets.vbt.returns.cumulative( start_value=start_value) benchmark_cumrets.vbt.plot(**benchmark_kwargs, add_trace_kwargs=add_trace_kwargs, fig=fig) else: benchmark_cumrets = None # Plot main if main_kwargs is None: main_kwargs = {} main_kwargs = merge_dicts( dict(trace_kwargs=dict( line_color=plotting_cfg['color_schema']['purple'], ), other_trace_kwargs='hidden'), main_kwargs) cumrets = self.cumulative(start_value=start_value) if fill_to_benchmark: cumrets.vbt.plot_against(benchmark_cumrets, **main_kwargs, add_trace_kwargs=add_trace_kwargs, fig=fig) else: cumrets.vbt.plot_against(start_value, **main_kwargs, add_trace_kwargs=add_trace_kwargs, fig=fig) # Plot hline if hline_shape_kwargs is None: hline_shape_kwargs = {} fig.add_shape(**merge_dicts( dict(type='line', xref="paper", yref=yref, x0=x_domain[0], y0=start_value, x1=x_domain[1], y1=start_value, line=dict( color="gray", dash="dash", )), hline_shape_kwargs)) return fig
def stats(self, benchmark_rets: tp.ArrayLike, levy_alpha: float = 2.0, risk_free: float = 0., required_return: float = 0., wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: """Compute various statistics on these returns. Args: benchmark_rets (array_like): Benchmark return to compare returns against. Will broadcast per element. levy_alpha (float): Scaling relation (Levy stability exponent). Will broadcast per column. risk_free (float): Constant risk-free return throughout the period. Will broadcast per column. required_return (float): Minimum acceptance return of the investor. Will broadcast per column. ## Example ```python-repl >>> import pandas as pd >>> from datetime import datetime >>> import vectorbt as vbt >>> symbols = ["BTC-USD", "SPY"] >>> price = vbt.YFData.download(symbols, missing_index='drop').get('Close') >>> returns = price.pct_change() >>> returns["BTC-USD"].vbt.returns(freq='D').stats(returns["SPY"]) Start 2014-09-17 00:00:00 End 2021-03-12 00:00:00 Duration 1629 days 00:00:00 Total Return [%] 12296.6 Benchmark Return [%] 122.857 Annual Return [%] 194.465 Annual Volatility [%] 88.4466 Sharpe Ratio 1.66841 Calmar Ratio 2.34193 Max. Drawdown [%] -83.0363 Omega Ratio 1.31107 Sortino Ratio 2.54018 Skew 0.0101324 Kurtosis 6.6453 Tail Ratio 1.19828 Common Sense Ratio 3.5285 Value at Risk -0.0664826 Alpha 2.90175 Beta 0.548808 Name: BTC-USD, dtype: object ``` """ # Run stats benchmark_rets = broadcast_to(benchmark_rets, self._obj) stats_df = pd.DataFrame( { 'Start': self.wrapper.index[0], 'End': self.wrapper.index[-1], 'Duration': self.wrapper.shape[0] * self.wrapper.freq, 'Total Return [%]': self.total() * 100, 'Benchmark Return [%]': benchmark_rets.vbt.returns.total() * 100, 'Annual Return [%]': self.annualized() * 100, 'Annual Volatility [%]': self.annualized_volatility(levy_alpha=levy_alpha) * 100, 'Sharpe Ratio': self.sharpe_ratio(risk_free=risk_free), 'Calmar Ratio': self.calmar_ratio(), 'Max. Drawdown [%]': self.max_drawdown() * 100, 'Omega Ratio': self.omega_ratio(required_return=required_return), 'Sortino Ratio': self.sortino_ratio(required_return=required_return), 'Skew': self._obj.skew(axis=0), 'Kurtosis': self._obj.kurtosis(axis=0), 'Tail Ratio': self.tail_ratio(), 'Common Sense Ratio': self.common_sense_ratio(), 'Value at Risk': self.value_at_risk(), 'Alpha': self.alpha(benchmark_rets, risk_free=risk_free), 'Beta': self.beta(benchmark_rets) }, index=self.wrapper.columns) # Select columns or reduce if self.is_series(): wrap_kwargs = merge_dicts(dict(name_or_index=stats_df.columns), wrap_kwargs) return self.wrapper.wrap_reduced(stats_df.iloc[0], **wrap_kwargs) return stats_df
def broadcast_to(self, other: tp.Union[tp.ArrayLike, "BaseAccessor"], **kwargs) -> reshape_fns.BCRT: """See `vectorbt.base.reshape_fns.broadcast_to`.""" if isinstance(other, BaseAccessor): other = other.obj return reshape_fns.broadcast_to(self.obj, other, **kwargs)
def stats(self, benchmark_rets, levy_alpha=2.0, risk_free=0., required_return=0.): """Compute various statistics on these returns. Args: benchmark_rets (array_like): Benchmark return to compare returns against. Will broadcast per element. levy_alpha (float or array_like): Scaling relation (Levy stability exponent). Will broadcast per column. risk_free (float or array_like): Constant risk-free return throughout the period. Will broadcast per column. required_return (float or array_like): Minimum acceptance return of the investor. Will broadcast per column. ## Example ```python-repl >>> import pandas as pd >>> from datetime import datetime >>> import yfinance as yf >>> import vectorbt as vbt >>> btc_price = yf.Ticker("BTC-USD").history()['Close'] >>> spy_price = yf.Ticker("SPY").history()['Close'] >>> price_df = pd.concat([btc_price, spy_price], axis=1, keys=("BTC-USD", "SPY")) >>> returns_df = price_df.pct_change() >>> returns_df["BTC-USD"].vbt.returns.stats(returns_df["SPY"]) Start 2020-11-01 00:00:00 End 2020-12-01 00:00:00 Duration 31 days 00:00:00 Total Return [%] 37.9835 Benchmark Return [%] 10.7935 Annual Return [%] 4329.46 Annual Volatility [%] 71.5084 Sharpe Ratio 5.84964 Calmar Ratio 413.819 Max. Drawdown [%] -10.4622 Omega Ratio 2.36607 Sortino Ratio 11.0962 Skew 0.036609 Kurtosis 1.04302 Tail Ratio 1.66878 Common Sense Ratio 73.9178 Value at Risk -0.0412519 Alpha 43.0408 Beta 0.531022 Name: BTC-USD, dtype: object ``` """ # Run stats benchmark_rets = reshape_fns.broadcast_to(benchmark_rets, self._obj) stats_df = pd.DataFrame( { 'Start': self.wrapper.index[0], 'End': self.wrapper.index[-1], 'Duration': self.wrapper.shape[0] * self.wrapper.freq, 'Total Return [%]': self.total() * 100, 'Benchmark Return [%]': benchmark_rets.vbt.returns.total() * 100, 'Annual Return [%]': self.annualized() * 100, 'Annual Volatility [%]': self.annualized_volatility(levy_alpha=levy_alpha) * 100, 'Sharpe Ratio': self.sharpe_ratio(risk_free=risk_free), 'Calmar Ratio': self.calmar_ratio(), 'Max. Drawdown [%]': self.max_drawdown() * 100, 'Omega Ratio': self.omega_ratio(required_return=required_return), 'Sortino Ratio': self.sortino_ratio(required_return=required_return), 'Skew': self._obj.skew(axis=0), 'Kurtosis': self._obj.kurtosis(axis=0), 'Tail Ratio': self.tail_ratio(), 'Common Sense Ratio': self.common_sense_ratio(), 'Value at Risk': self.value_at_risk(), 'Alpha': self.alpha(benchmark_rets, risk_free=risk_free), 'Beta': self.beta(benchmark_rets) }, index=self.wrapper.columns) # Select columns or reduce if self.is_series(): return self.wrapper.wrap_reduced(stats_df.iloc[0], index=stats_df.columns) return stats_df