def recovered_rate(self, group_by=None, wrap_kwargs=None): """Rate of recovered drawdowns.""" recovered_count = to_1d(self.recovered.count(group_by=group_by), raw=True) total_count = to_1d(self.count(group_by=group_by), raw=True) wrap_kwargs = merge_dicts(dict(name_or_index='recovered_rate'), wrap_kwargs) return self.wrapper.wrap_reduced(recovered_count / total_count, group_by=group_by, **wrap_kwargs)
def current_duration(self, group_by=None, **kwargs): """Current duration from peak. Does not support grouping.""" if self.wrapper.grouper.is_grouped(group_by=group_by): raise ValueError("Grouping is not supported by this method") kwargs = merge_dicts( dict(wrap_kwargs=dict(time_units=True, name_or_index='current_duration')), kwargs) return self.active.duration.nst(-1, group_by=group_by, **kwargs)
def avg_distance(self, to=None, **kwargs) -> tp.MaybeSeries: """Calculate the average distance between True values in `self` and optionally `to`. See `SignalsAccessor.map_reduce_between`.""" kwargs = merge_dicts( dict(wrap_kwargs=dict(name_or_index='avg_distance')), kwargs) return self.map_reduce_between(other=to, map_func_nb=nb.distance_map_nb, reduce_func_nb=nb.mean_reduce_nb, **kwargs)
def plots_defaults(self) -> tp.Kwargs: """Defaults for `Drawdowns.plots`. Merges `vectorbt.generic.ranges.Ranges.plots_defaults` and `drawdowns.plots` from `vectorbt._settings.settings`.""" from vectorbt._settings import settings drawdowns_plots_cfg = settings['drawdowns']['plots'] return merge_dicts(Ranges.plots_defaults.__get__(self), drawdowns_plots_cfg)
def plots_defaults(self) -> tp.Kwargs: """Defaults for `Records.plots`. Merges `vectorbt.generic.plots_builder.PlotsBuilderMixin.plots_defaults` and `records.plots` from `vectorbt._settings.settings`.""" from vectorbt._settings import settings records_plots_cfg = settings['records']['plots'] return merge_dicts(PlotsBuilderMixin.plots_defaults.__get__(self), records_plots_cfg)
def rolling_calmar_ratio( self, window: int, minp: tp.Optional[int] = None, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: """Rolling version of `ReturnsAccessor.calmar_ratio`.""" result = nb.rolling_calmar_ratio_nb(self.to_2d_array(), window, minp, self.ann_factor) wrap_kwargs = merge_dicts({}, wrap_kwargs) return self.wrapper.wrap(result, **wrap_kwargs)
def cumulative(self, start_value: float = 0., wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: """Cumulative returns. Args: start_value (float): The starting returns.""" cumulative = nb.cum_returns_nb(self.to_2d_array(), start_value) wrap_kwargs = merge_dicts({}, wrap_kwargs) return self.wrapper.wrap(cumulative, **wrap_kwargs)
def buy_rate(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries: """Rate of buy operations.""" buy_count = to_1d(self.buy.count(group_by=group_by), raw=True) total_count = to_1d(self.count(group_by=group_by), raw=True) wrap_kwargs = merge_dicts(dict(name_or_index='buy_rate'), wrap_kwargs) return self.wrapper.wrap_reduced(buy_count / total_count, group_by=group_by, **wrap_kwargs)
def open_rate(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries: """Rate of open trades.""" open_count = to_1d(self.open.count(group_by=group_by), raw=True) total_count = to_1d(self.count(group_by=group_by), raw=True) wrap_kwargs = merge_dicts(dict(name_or_index='open_rate'), wrap_kwargs) return self.wrapper.wrap_reduced(open_count / total_count, group_by=group_by, **wrap_kwargs)
def cond_value_at_risk(self, cutoff=0.05, wrap_kwargs=None): """Conditional value at risk (CVaR) of a returns stream. Args: cutoff (float or array_like): Decimal representing the percentage cutoff for the bottom percentile of returns.""" wrap_kwargs = merge_dicts(dict(name_or_index='cond_value_at_risk'), wrap_kwargs) return self.wrapper.wrap_reduced(nb.cond_value_at_risk_nb( self.to_2d_array(), cutoff ), **wrap_kwargs)
def omega_ratio(self, risk_free=0., required_return=0., wrap_kwargs=None): """Omega ratio of a strategy. Args: risk_free (float or array_like): Constant risk-free return throughout the period. required_return (float or array_like): Minimum acceptance return of the investor.""" wrap_kwargs = merge_dicts(dict(name_or_index='omega_ratio'), wrap_kwargs) return self.wrapper.wrap_reduced(nb.omega_ratio_nb( self.to_2d_array(), self.ann_factor, risk_free, required_return ), **wrap_kwargs)
def sortino_ratio(self, required_return=0., wrap_kwargs=None): """Sortino ratio of a strategy. Args: required_return (float or array_like): Minimum acceptance return of the investor. Will broadcast per column.""" wrap_kwargs = merge_dicts(dict(name_or_index='sortino_ratio'), wrap_kwargs) return self.wrapper.wrap_reduced(nb.sortino_ratio_nb( self.to_2d_array(), self.ann_factor, required_return ), **wrap_kwargs)
def max_duration(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None, **kwargs) -> tp.MaybeSeries: """Maximum range duration (as timedelta).""" wrap_kwargs = merge_dicts( dict(to_timedelta=True, name_or_index='max_duration'), wrap_kwargs) return self.duration.max(group_by=group_by, wrap_kwargs=wrap_kwargs, **kwargs)
def plots_defaults(self) -> tp.Kwargs: """Defaults for `Ranges.plots`. Merges `vectorbt.records.base.Records.plots_defaults` and `ranges.plots` from `vectorbt._settings.settings`.""" from vectorbt._settings import settings ranges_plots_cfg = settings['ranges']['plots'] return merge_dicts(Records.plots_defaults.__get__(self), ranges_plots_cfg)
def tile(self, n, keys=None, axis=1, wrap_kwargs=None): """See `vectorbt.base.reshape_fns.tile`. Set `axis` to 1 for columns and 0 for index. Use `keys` as the outermost level.""" tiled = reshape_fns.tile(self._obj, n, axis=axis) if keys is not None: if axis == 1: new_columns = index_fns.combine_indexes( keys, self.wrapper.columns) return tiled.vbt.wrapper.wrap( tiled.values, **merge_dicts(dict(columns=new_columns), wrap_kwargs)) else: new_index = index_fns.combine_indexes(keys, self.wrapper.index) return tiled.vbt.wrapper.wrap( tiled.values, **merge_dicts(dict(index=new_index), wrap_kwargs)) return tiled
def update_symbol(self, symbol, **kwargs): """Update the symbol. `**kwargs` will override keyword arguments passed to `CCXTData.download_symbol`.""" download_kwargs = self.select_symbol_kwargs(symbol, self.download_kwargs) download_kwargs['start'] = self.data[symbol].index[-1] download_kwargs['show_progress'] = False kwargs = merge_dicts(download_kwargs, kwargs) return self.download_symbol(symbol, **kwargs)
def sqn(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries: """System Quality Number (SQN).""" count = to_1d(self.count(group_by=group_by), raw=True) pnl_mean = to_1d(self.pnl.mean(group_by=group_by), raw=True) pnl_std = to_1d(self.pnl.std(group_by=group_by), raw=True) sqn = np.sqrt(count) * pnl_mean / pnl_std wrap_kwargs = merge_dicts(dict(name_or_index='sqn'), wrap_kwargs) return self.wrapper.wrap_reduced(sqn, group_by=group_by, **wrap_kwargs)
def reduce(self, reduce_func_nb: tp.ReduceFunc, *args, idx_arr: tp.Optional[tp.Array1d] = None, returns_array: bool = False, returns_idx: bool = False, to_index: bool = True, fill_value: tp.Scalar = np.nan, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeriesFrame: """Reduce mapped array by column/group. If `returns_array` is False and `returns_idx` is False, see `vectorbt.records.nb.reduce_mapped_nb`. If `returns_array` is False and `returns_idx` is True, see `vectorbt.records.nb.reduce_mapped_to_idx_nb`. If `returns_array` is True and `returns_idx` is False, see `vectorbt.records.nb.reduce_mapped_to_array_nb`. If `returns_array` is True and `returns_idx` is True, see `vectorbt.records.nb.reduce_mapped_to_idx_array_nb`. If `returns_idx` is True, must pass `idx_arr`. Set `to_index` to False to return raw positions instead of labels. Use `fill_value` to set the default value. Set `group_by` to False to disable grouping. """ # Perform checks checks.assert_numba_func(reduce_func_nb) if idx_arr is None: if self.idx_arr is None: if returns_idx: raise ValueError("Must pass idx_arr") idx_arr = self.idx_arr # Perform main computation col_map = self.col_mapper.get_col_map(group_by=group_by) if not returns_array: if not returns_idx: out = nb.reduce_mapped_nb(self.values, col_map, fill_value, reduce_func_nb, *args) else: out = nb.reduce_mapped_to_idx_nb(self.values, col_map, idx_arr, fill_value, reduce_func_nb, *args) else: if not returns_idx: out = nb.reduce_mapped_to_array_nb(self.values, col_map, fill_value, reduce_func_nb, *args) else: out = nb.reduce_mapped_to_idx_array_nb(self.values, col_map, idx_arr, fill_value, reduce_func_nb, *args) # Perform post-processing wrap_kwargs = merge_dicts( dict(name_or_index='reduce' if not returns_array else None, to_index=returns_idx and to_index, fillna=-1 if returns_idx else None, dtype=np.int_ if returns_idx else None), wrap_kwargs) return self.wrapper.wrap_reduced(out, group_by=group_by, **wrap_kwargs)
def sharpe_ratio(self, risk_free=0., wrap_kwargs=None): """Sharpe ratio of a strategy. Args: risk_free (float or array_like): Constant risk-free return throughout the period. Will broadcast per column.""" risk_free = np.broadcast_to(risk_free, (len(self.wrapper.columns),)) wrap_kwargs = merge_dicts(dict(name_or_index='sharpe_ratio'), wrap_kwargs) return self.wrapper.wrap_reduced(nb.sharpe_ratio_nb( self.to_2d_array(), self.ann_factor, risk_free ), **wrap_kwargs)
def value_at_risk(self, cutoff=0.05, wrap_kwargs=None): """Value at risk (VaR) of a returns stream. Args: cutoff (float or array_like): Decimal representing the percentage cutoff for the bottom percentile of returns. Will broadcast per column.""" cutoff = np.broadcast_to(cutoff, (len(self.wrapper.columns),)) wrap_kwargs = merge_dicts(dict(name_or_index='value_at_risk'), wrap_kwargs) return self.wrapper.wrap_reduced(nb.value_at_risk_nb( self.to_2d_array(), cutoff ), **wrap_kwargs)
def active_rate(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries: """Rate of recovered drawdowns.""" active_count = to_1d(self.active.count(group_by=group_by), raw=True) total_count = to_1d(self.count(group_by=group_by), raw=True) wrap_kwargs = merge_dicts(dict(name_or_index='active_rate'), wrap_kwargs) return self.wrapper.wrap_reduced(active_count / total_count, group_by=group_by, **wrap_kwargs)
def current_drawdown(self, group_by=None, wrap_kwargs=None): """Current drawdown from peak. Does not support grouping.""" if self.wrapper.grouper.is_grouped(group_by=group_by): raise ValueError("Grouping is not supported by this method") curr_end_val = self.active.end_value.nst(-1, group_by=group_by) curr_start_val = self.active.start_value.nst(-1, group_by=group_by) curr_drawdown = (curr_end_val - curr_start_val) / curr_start_val wrap_kwargs = merge_dicts(dict(name_or_index='current_drawdown'), wrap_kwargs) return self.wrapper.wrap_reduced(curr_drawdown, group_by=group_by, **wrap_kwargs)
def coverage(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries: """Coverage, that is, total duration divided by the whole period.""" total_duration = to_1d(self.duration.sum(group_by=group_by), raw=True) total_steps = self.wrapper.grouper.get_group_lens( group_by=group_by) * self.wrapper.shape[0] wrap_kwargs = merge_dicts(dict(name_or_index='coverage'), wrap_kwargs) return self.wrapper.wrap_reduced(total_duration / total_steps, group_by=group_by, **wrap_kwargs)
def call(self, mapping: tp.Optional[tp.Mapping] = None) -> tp.Any: """Call `RepFunc.func` using `mapping`. Merges `mapping` and `RepFunc.mapping`.""" mapping = merge_dicts(self.mapping, mapping) func_arg_names = get_func_arg_names(self.func) func_kwargs = dict() for k, v in mapping.items(): if k in func_arg_names: func_kwargs[k] = v return self.func(**func_kwargs)
def rolling_cond_value_at_risk( self, window: int, minp: tp.Optional[int] = None, cutoff: float = 0.05, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: """Rolling version of `ReturnsAccessor.cond_value_at_risk`.""" result = nb.rolling_cond_value_at_risk_nb(self.to_2d_array(), window, minp, cutoff) wrap_kwargs = merge_dicts({}, wrap_kwargs) return self.wrapper.wrap(result, **wrap_kwargs)
def downside_risk(self, required_return=0., wrap_kwargs=None): """Downside deviation below a threshold. Args: required_return (float or array_like): Minimum acceptance return of the investor. Will broadcast per column.""" required_return = np.broadcast_to(required_return, (len(self.wrapper.columns),)) wrap_kwargs = merge_dicts(dict(name_or_index='downside_risk'), wrap_kwargs) return self.wrapper.wrap_reduced(nb.downside_risk_nb( self.to_2d_array(), self.ann_factor, required_return ), **wrap_kwargs)
def rolling_downside_risk( self, window: int, minp: tp.Optional[int] = None, required_return: float = 0., wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: """Rolling version of `ReturnsAccessor.downside_risk`.""" result = nb.rolling_downside_risk_nb(self.to_2d_array(), window, minp, self.ann_factor, required_return) wrap_kwargs = merge_dicts({}, wrap_kwargs) return self.wrapper.wrap(result, **wrap_kwargs)
def current_return(self, group_by=None, **kwargs): """Current return from valley. Does not support grouping.""" if self.wrapper.grouper.is_grouped(group_by=group_by): raise ValueError("Grouping is not supported by this method") recovery_return = self.active.map(nb.dd_recovery_return_map_nb, self.ts.vbt.to_2d_array()) kwargs = merge_dicts( dict(wrap_kwargs=dict(name_or_index='current_return')), kwargs) return recovery_return.nst(-1, group_by=group_by, **kwargs)
def annualized_volatility(self, levy_alpha=2.0, wrap_kwargs=None): """Annualized volatility of a strategy. Args: levy_alpha (float or array_like): Scaling relation (Levy stability exponent). Will broadcast per column.""" levy_alpha = np.broadcast_to(levy_alpha, (len(self.wrapper.columns),)) wrap_kwargs = merge_dicts(dict(name_or_index='annualized_volatility'), wrap_kwargs) return self.wrapper.wrap_reduced(nb.annualized_volatility_nb( self.to_2d_array(), self.ann_factor, levy_alpha ), **wrap_kwargs)
def rank(self, reset_by=None, after_false=False, allow_gaps=False, broadcast_kwargs=None, wrap_kwargs=None): """See `vectorbt.signals.nb.rank_nb`. ## Example Rank each True value in each partition in `sig`: ```python-repl >>> sig.vbt.signals.rank() a b c 2020-01-01 1 1 1 2020-01-02 0 0 2 2020-01-03 0 1 3 2020-01-04 0 0 0 2020-01-05 0 1 0 >>> sig.vbt.signals.rank(after_false=True) a b c 2020-01-01 0 0 0 2020-01-02 0 0 0 2020-01-03 0 1 0 2020-01-04 0 0 0 2020-01-05 0 1 0 >>> sig.vbt.signals.rank(allow_gaps=True) a b c 2020-01-01 1 1 1 2020-01-02 0 0 2 2020-01-03 0 2 3 2020-01-04 0 0 0 2020-01-05 0 3 0 >>> sig.vbt.signals.rank(reset_by=~sig, allow_gaps=True) a b c 2020-01-01 1 1 1 2020-01-02 0 0 2 2020-01-03 0 1 3 2020-01-04 0 0 0 2020-01-05 0 1 0 ``` """ if broadcast_kwargs is None: broadcast_kwargs = {} if reset_by is not None: obj, reset_by = reshape_fns.broadcast(self._obj, reset_by, **broadcast_kwargs) reset_by = reset_by.vbt.to_2d_array() else: obj = self._obj ranked = nb.rank_nb( obj.vbt.to_2d_array(), reset_by=reset_by, after_false=after_false, allow_gaps=allow_gaps) return obj.vbt.wrapper.wrap(ranked, **merge_dicts({}, wrap_kwargs))