Example #1
0
def Analysis(results):
    """
    技术指标分析器
    :param results:
    {
    'returns':[0.1,0.1,0.1],
    'benchmark':[0.1,0.1,0.1]
    'trades':[[2020.01.01 01:00:00,'BUY',6234.10,1]]
    }
    :return:
    """
    res = pnl_res(results["returns"])
    bres = pnl_res(results["benchmark"])
    return_ratio = empyrical.cum_returns_final(res)
    annual_return_ratio = empyrical.annual_return(res)
    sharp_ratio = empyrical.sharpe_ratio(res, 0.035 / 252)
    return_volatility = empyrical.annual_volatility(res)
    max_drawdown = empyrical.max_drawdown(res)
    alpha, beta = empyrical.alpha_beta_aligned(res, bres)
    pls, wr = pls_ws(results["trades"])
    return {
        'pls': pls,
        'wr': wr,
        'return_ratio': return_ratio,
        'annual_return_ratio': annual_return_ratio,
        'beta': beta,
        'alpha': alpha,
        'sharp_ratio': sharp_ratio,
        'return_volatility': return_volatility,
        'max_drawdown': max_drawdown,
    }
Example #2
0
    def calculate_metrics(self):
        self.benchmark_period_returns = \
            cum_returns(self.benchmark_returns).iloc[-1]

        self.algorithm_period_returns = \
            cum_returns(self.algorithm_returns).iloc[-1]

        if not self.algorithm_returns.index.equals(
                self.benchmark_returns.index):
            message = "Mismatch between benchmark_returns ({bm_count}) and \
            algorithm_returns ({algo_count}) in range {start} : {end}"

            message = message.format(bm_count=len(self.benchmark_returns),
                                     algo_count=len(self.algorithm_returns),
                                     start=self._start_session,
                                     end=self._end_session)
            raise Exception(message)

        self.num_trading_days = len(self.benchmark_returns)

        self.mean_algorithm_returns = (
            self.algorithm_returns.cumsum() /
            np.arange(1, self.num_trading_days + 1, dtype=np.float64))

        self.benchmark_volatility = annual_volatility(self.benchmark_returns)
        self.algorithm_volatility = annual_volatility(self.algorithm_returns)

        self.treasury_period_return = choose_treasury(
            self.treasury_curves,
            self._start_session,
            self._end_session,
            self.trading_calendar,
        )
        self.sharpe = sharpe_ratio(self.algorithm_returns, )
        # The consumer currently expects a 0.0 value for sharpe in period,
        # this differs from cumulative which was np.nan.
        # When factoring out the sharpe_ratio, the different return types
        # were collapsed into `np.nan`.
        # TODO: Either fix consumer to accept `np.nan` or make the
        # `sharpe_ratio` return type configurable.
        # In the meantime, convert nan values to 0.0
        if pd.isnull(self.sharpe):
            self.sharpe = 0.0
        self.downside_risk = downside_risk(self.algorithm_returns.values)
        self.sortino = sortino_ratio(
            self.algorithm_returns.values,
            _downside_risk=self.downside_risk,
        )
        self.information = information_ratio(
            self.algorithm_returns.values,
            self.benchmark_returns.values,
        )
        self.alpha, self.beta = alpha_beta_aligned(
            self.algorithm_returns.values,
            self.benchmark_returns.values,
        )
        self.excess_return = self.algorithm_period_returns - \
            self.treasury_period_return
        self.max_drawdown = max_drawdown(self.algorithm_returns.values)
        self.max_leverage = self.calculate_max_leverage()
Example #3
0
    def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
        risk = packet["cumulative_risk_metrics"]

        alpha, beta = ep.alpha_beta_aligned(
            ledger.daily_returns_array[:session_ix + 1],
            self._daily_returns_array[:session_ix + 1],
        )
        if not np.isfinite(alpha):
            alpha = None
        if np.isnan(beta):
            beta = None

        risk["alpha"] = alpha
        risk["beta"] = beta
Example #4
0
    def getStats(cls, returns, benchmark_returns):
        _alpha, _beta = alpha_beta_aligned(
            returns,
            benchmark_returns,
        )

        _sharpe = sharpe_ratio(
            returns
        )

        _downside_risk = downside_risk(
            returns
        )

        _max_drawdown = max_drawdown(
            returns
        )

        _annual_volatility = annual_volatility(
            returns
        )

        _benchmark_volatility = annual_volatility(
            benchmark_returns
        )

        _annual_return = annual_return(
            returns
        )

        _cum_return = cum_returns(
            returns
        )

        return {
            'cum_return' : _cum_return,
            'annual_return' : _annual_return,
            'annual_volatility' : _annual_volatility,
            'benchmark_volatility' : _benchmark_volatility,
            'max_drawdown' : _max_drawdown,
            'downside_risk' : _downside_risk,
            'sharpe ratio' : _sharpe,
            'alpha' : _alpha,
            'beta' : _beta,
        }
Example #5
0
    def end_of_bar(self,
                   packet,
                   ledger,
                   dt,
                   session_ix,
                   data_portal):
        risk = packet['cumulative_risk_metrics']
        alpha, beta = ep.alpha_beta_aligned(
            ledger.daily_returns_array[:session_ix + 1],
            self._daily_returns_array[:session_ix + 1],
        )

        if np.isnan(alpha):
            alpha = None
        if np.isnan(beta):
            beta = None

        risk['alpha'] = alpha
        risk['beta'] = beta
Example #6
0
    def update(self, dt, algorithm_returns, benchmark_returns, leverage):
        # Keep track of latest dt for use in to_dict and other methods
        # that report current state.
        self.latest_dt = dt
        dt_loc = self.cont_index.get_loc(dt)
        self.latest_dt_loc = dt_loc

        self.algorithm_returns_cont[dt_loc] = algorithm_returns
        self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1]

        self.num_trading_days = len(self.algorithm_returns)

        if self.create_first_day_stats:
            if len(self.algorithm_returns) == 1:
                self.algorithm_returns = np.append(0.0, self.algorithm_returns)

        self.algorithm_cumulative_returns[dt_loc] = cum_returns(
            self.algorithm_returns)[-1]

        algo_cumulative_returns_to_date = \
            self.algorithm_cumulative_returns[:dt_loc + 1]

        self.mean_returns_cont[dt_loc] = \
            algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days

        self.mean_returns = self.mean_returns_cont[:dt_loc + 1]

        self.annualized_mean_returns_cont[dt_loc] = \
            self.mean_returns_cont[dt_loc] * 252

        self.annualized_mean_returns = \
            self.annualized_mean_returns_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.mean_returns) == 1:
                self.mean_returns = np.append(0.0, self.mean_returns)
                self.annualized_mean_returns = np.append(
                    0.0, self.annualized_mean_returns)

        self.benchmark_returns_cont[dt_loc] = benchmark_returns
        self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.benchmark_returns) == 1:
                self.benchmark_returns = np.append(0.0, self.benchmark_returns)

        self.benchmark_cumulative_returns[dt_loc] = cum_returns(
            self.benchmark_returns)[-1]

        benchmark_cumulative_returns_to_date = \
            self.benchmark_cumulative_returns[:dt_loc + 1]

        self.mean_benchmark_returns_cont[dt_loc] = \
            benchmark_cumulative_returns_to_date[dt_loc] / \
            self.num_trading_days

        self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc]

        self.annualized_mean_benchmark_returns_cont[dt_loc] = \
            self.mean_benchmark_returns_cont[dt_loc] * 252

        self.annualized_mean_benchmark_returns = \
            self.annualized_mean_benchmark_returns_cont[:dt_loc + 1]

        self.algorithm_cumulative_leverages_cont[dt_loc] = leverage
        self.algorithm_cumulative_leverages = \
            self.algorithm_cumulative_leverages_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.algorithm_cumulative_leverages) == 1:
                self.algorithm_cumulative_leverages = np.append(
                    0.0, self.algorithm_cumulative_leverages)

        if not len(self.algorithm_returns) and len(self.benchmark_returns):
            message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end} on {dt}"

            message = message.format(bm_count=len(self.benchmark_returns),
                                     algo_count=len(self.algorithm_returns),
                                     start=self.start_session,
                                     end=self.end_session,
                                     dt=dt)
            raise Exception(message)

        self.update_current_max()
        self.benchmark_volatility[dt_loc] = annual_volatility(
            self.benchmark_returns)
        self.algorithm_volatility[dt_loc] = annual_volatility(
            self.algorithm_returns)

        # caching the treasury rates for the minutely case is a
        # big speedup, because it avoids searching the treasury
        # curves on every minute.
        # In both minutely and daily, the daily curve is always used.
        treasury_end = dt.replace(hour=0, minute=0)
        if np.isnan(self.daily_treasury[treasury_end]):
            treasury_period_return = choose_treasury(
                self.treasury_curves,
                self.start_session,
                treasury_end,
                self.trading_calendar,
            )
            self.daily_treasury[treasury_end] = treasury_period_return
        self.treasury_period_return = self.daily_treasury[treasury_end]
        self.excess_returns[dt_loc] = (
            self.algorithm_cumulative_returns[dt_loc] -
            self.treasury_period_return)

        self.alpha[dt_loc], self.beta[dt_loc] = alpha_beta_aligned(
            self.algorithm_returns,
            self.benchmark_returns,
        )
        self.sharpe[dt_loc] = sharpe_ratio(self.algorithm_returns, )
        self.downside_risk[dt_loc] = downside_risk(self.algorithm_returns)
        self.sortino[dt_loc] = sortino_ratio(
            self.algorithm_returns, _downside_risk=self.downside_risk[dt_loc])
        self.max_drawdown = max_drawdown(self.algorithm_returns)
        self.max_drawdowns[dt_loc] = self.max_drawdown
        self.max_leverage = self.calculate_max_leverage()
        self.max_leverages[dt_loc] = self.max_leverage
Example #7
0
    def risk_metric_period(
        cls,
        start_session,
        end_session,
        algorithm_returns,
        benchmark_returns,
        algorithm_leverages,
    ):
        """
        Creates a dictionary representing the state of the risk report.

        Parameters
        ----------
        start_session : pd.Timestamp
            Start of period (inclusive) to produce metrics on
        end_session : pd.Timestamp
            End of period (inclusive) to produce metrics on
        algorithm_returns : pd.Series(pd.Timestamp -> float)
            Series of algorithm returns as of the end of each session
        benchmark_returns : pd.Series(pd.Timestamp -> float)
            Series of benchmark returns as of the end of each session
        algorithm_leverages : pd.Series(pd.Timestamp -> float)
            Series of algorithm leverages as of the end of each session


        Returns
        -------
        risk_metric : dict[str, any]
            Dict of metrics that with fields like:
                {
                    'algorithm_period_return': 0.0,
                    'benchmark_period_return': 0.0,
                    'treasury_period_return': 0,
                    'excess_return': 0.0,
                    'alpha': 0.0,
                    'beta': 0.0,
                    'sharpe': 0.0,
                    'sortino': 0.0,
                    'period_label': '1970-01',
                    'trading_days': 0,
                    'algo_volatility': 0.0,
                    'benchmark_volatility': 0.0,
                    'max_drawdown': 0.0,
                    'max_leverage': 0.0,
                }
        """

        algorithm_returns = algorithm_returns[
            (algorithm_returns.index >= start_session)
            & (algorithm_returns.index <= end_session)]

        # Benchmark needs to be masked to the same dates as the algo returns
        benchmark_returns = benchmark_returns[
            (benchmark_returns.index >= start_session)
            & (benchmark_returns.index <= algorithm_returns.index[-1])]

        benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1]
        algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1]

        alpha, beta = ep.alpha_beta_aligned(
            algorithm_returns.values,
            benchmark_returns.values,
        )
        benchmark_volatility = ep.annual_volatility(benchmark_returns)

        sharpe = ep.sharpe_ratio(algorithm_returns)

        # The consumer currently expects a 0.0 value for sharpe in period,
        # this differs from cumulative which was np.nan.
        # When factoring out the sharpe_ratio, the different return types
        # were collapsed into `np.nan`.
        # TODO: Either fix consumer to accept `np.nan` or make the
        # `sharpe_ratio` return type configurable.
        # In the meantime, convert nan values to 0.0
        if pd.isnull(sharpe):
            sharpe = 0.0

        sortino = ep.sortino_ratio(
            algorithm_returns.values,
            _downside_risk=ep.downside_risk(algorithm_returns.values),
        )

        rval = {
            "algorithm_period_return": algorithm_period_returns,
            "benchmark_period_return": benchmark_period_returns,
            "treasury_period_return": 0,
            "excess_return": algorithm_period_returns,
            "alpha": alpha,
            "beta": beta,
            "sharpe": sharpe,
            "sortino": sortino,
            "period_label": end_session.strftime("%Y-%m"),
            "trading_days": len(benchmark_returns),
            "algo_volatility": ep.annual_volatility(algorithm_returns),
            "benchmark_volatility": benchmark_volatility,
            "max_drawdown": ep.max_drawdown(algorithm_returns.values),
            "max_leverage": algorithm_leverages.max(),
        }

        # check if a field in rval is nan or inf, and replace it with None
        # except period_label which is always a str
        return {
            k: (None if k != "period_label" and not np.isfinite(v) else v)
            for k, v in rval.items()
        }
Example #8
0
    def risk_metric_period(cls,
                           start_session,
                           end_session,
                           algorithm_returns,
                           benchmark_returns,
                           algorithm_leverages):
        """
        Creates a dictionary representing the state of the risk report.

        Parameters
        ----------
        start_session : pd.Timestamp
            Start of period (inclusive) to produce metrics on
        end_session : pd.Timestamp
            End of period (inclusive) to produce metrics on
        algorithm_returns : pd.Series(pd.Timestamp -> float)
            Series of algorithm returns as of the end of each session
        benchmark_returns : pd.Series(pd.Timestamp -> float)
            Series of benchmark returns as of the end of each session
        algorithm_leverages : pd.Series(pd.Timestamp -> float)
            Series of algorithm leverages as of the end of each session


        Returns
        -------
        risk_metric : dict[str, any]
            Dict of metrics that with fields like:
                {
                    'algorithm_period_return': 0.0,
                    'benchmark_period_return': 0.0,
                    'treasury_period_return': 0,
                    'excess_return': 0.0,
                    'alpha': 0.0,
                    'beta': 0.0,
                    'sharpe': 0.0,
                    'sortino': 0.0,
                    'period_label': '1970-01',
                    'trading_days': 0,
                    'algo_volatility': 0.0,
                    'benchmark_volatility': 0.0,
                    'max_drawdown': 0.0,
                    'max_leverage': 0.0,
                }
        """

        algorithm_returns = algorithm_returns[
            (algorithm_returns.index >= start_session) &
            (algorithm_returns.index <= end_session)
        ]

        # Benchmark needs to be masked to the same dates as the algo returns
        benchmark_returns = benchmark_returns[
            (benchmark_returns.index >= start_session) &
            (benchmark_returns.index <= algorithm_returns.index[-1])
        ]

        benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1]
        algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1]

        alpha, beta = ep.alpha_beta_aligned(
            algorithm_returns.values,
            benchmark_returns.values,
        )

        sharpe = ep.sharpe_ratio(algorithm_returns)

        # The consumer currently expects a 0.0 value for sharpe in period,
        # this differs from cumulative which was np.nan.
        # When factoring out the sharpe_ratio, the different return types
        # were collapsed into `np.nan`.
        # TODO: Either fix consumer to accept `np.nan` or make the
        # `sharpe_ratio` return type configurable.
        # In the meantime, convert nan values to 0.0
        if pd.isnull(sharpe):
            sharpe = 0.0

        sortino = ep.sortino_ratio(
            algorithm_returns.values,
            _downside_risk=ep.downside_risk(algorithm_returns.values),
        )

        rval = {
            'algorithm_period_return': algorithm_period_returns,
            'benchmark_period_return': benchmark_period_returns,
            'treasury_period_return': 0,
            'excess_return': algorithm_period_returns,
            'alpha': alpha,
            'beta': beta,
            'sharpe': sharpe,
            'sortino': sortino,
            'period_label': end_session.strftime("%Y-%m"),
            'trading_days': len(benchmark_returns),
            'algo_volatility': ep.annual_volatility(algorithm_returns),
            'benchmark_volatility': ep.annual_volatility(benchmark_returns),
            'max_drawdown': ep.max_drawdown(algorithm_returns.values),
            'max_leverage': algorithm_leverages.max(),
        }

        # check if a field in rval is nan or inf, and replace it with None
        # except period_label which is always a str
        return {
            k: (
                None
                if k != 'period_label' and not np.isfinite(v) else
                v
            )
            for k, v in iteritems(rval)
        }
Example #9
0
    def calculate_metrics(self):
        self.benchmark_period_returns = \
            cum_returns(self.benchmark_returns).iloc[-1]

        self.algorithm_period_returns = \
            cum_returns(self.algorithm_returns).iloc[-1]

        if not self.algorithm_returns.index.equals(
            self.benchmark_returns.index
        ):
            message = "Mismatch between benchmark_returns ({bm_count}) and \
            algorithm_returns ({algo_count}) in range {start} : {end}"
            message = message.format(
                bm_count=len(self.benchmark_returns),
                algo_count=len(self.algorithm_returns),
                start=self._start_session,
                end=self._end_session
            )
            raise Exception(message)

        self.num_trading_days = len(self.benchmark_returns)

        self.mean_algorithm_returns = (
            self.algorithm_returns.cumsum() /
            np.arange(1, self.num_trading_days + 1, dtype=np.float64)
        )

        self.benchmark_volatility = annual_volatility(self.benchmark_returns)
        self.algorithm_volatility = annual_volatility(self.algorithm_returns)

        self.treasury_period_return = choose_treasury(
            self.treasury_curves,
            self._start_session,
            self._end_session,
            self.trading_calendar,
        )
        self.sharpe = sharpe_ratio(
            self.algorithm_returns,
        )
        # The consumer currently expects a 0.0 value for sharpe in period,
        # this differs from cumulative which was np.nan.
        # When factoring out the sharpe_ratio, the different return types
        # were collapsed into `np.nan`.
        # TODO: Either fix consumer to accept `np.nan` or make the
        # `sharpe_ratio` return type configurable.
        # In the meantime, convert nan values to 0.0
        if pd.isnull(self.sharpe):
            self.sharpe = 0.0
        self.downside_risk = downside_risk(
            self.algorithm_returns.values
        )
        self.sortino = sortino_ratio(
            self.algorithm_returns.values,
            _downside_risk=self.downside_risk,
        )
        self.information = information_ratio(
            self.algorithm_returns.values,
            self.benchmark_returns.values,
        )
        self.alpha, self.beta = alpha_beta_aligned(
            self.algorithm_returns.values,
            self.benchmark_returns.values,
        )
        self.excess_return = self.algorithm_period_returns - \
            self.treasury_period_return
        self.max_drawdown = max_drawdown(self.algorithm_returns.values)
        self.max_leverage = self.calculate_max_leverage()
Example #10
0
    def update(self, dt, algorithm_returns, benchmark_returns, leverage):
        # Keep track of latest dt for use in to_dict and other methods
        # that report current state.
        self.latest_dt = dt
        dt_loc = self.cont_index.get_loc(dt)
        self.latest_dt_loc = dt_loc

        self.algorithm_returns_cont[dt_loc] = algorithm_returns
        self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1]

        self.num_trading_days = len(self.algorithm_returns)

        if self.create_first_day_stats:
            if len(self.algorithm_returns) == 1:
                self.algorithm_returns = np.append(0.0, self.algorithm_returns)

        self.algorithm_cumulative_returns[dt_loc] = cum_returns(
            self.algorithm_returns
        )[-1]

        algo_cumulative_returns_to_date = \
            self.algorithm_cumulative_returns[:dt_loc + 1]

        self.mean_returns_cont[dt_loc] = \
            algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days

        self.mean_returns = self.mean_returns_cont[:dt_loc + 1]

        self.annualized_mean_returns_cont[dt_loc] = \
            self.mean_returns_cont[dt_loc] * 252

        self.annualized_mean_returns = \
            self.annualized_mean_returns_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.mean_returns) == 1:
                self.mean_returns = np.append(0.0, self.mean_returns)
                self.annualized_mean_returns = np.append(
                    0.0, self.annualized_mean_returns)

        self.benchmark_returns_cont[dt_loc] = benchmark_returns
        self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.benchmark_returns) == 1:
                self.benchmark_returns = np.append(0.0, self.benchmark_returns)

        self.benchmark_cumulative_returns[dt_loc] = cum_returns(
            self.benchmark_returns
        )[-1]

        benchmark_cumulative_returns_to_date = \
            self.benchmark_cumulative_returns[:dt_loc + 1]

        self.mean_benchmark_returns_cont[dt_loc] = \
            benchmark_cumulative_returns_to_date[dt_loc] / \
            self.num_trading_days

        self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc]

        self.annualized_mean_benchmark_returns_cont[dt_loc] = \
            self.mean_benchmark_returns_cont[dt_loc] * 252

        self.annualized_mean_benchmark_returns = \
            self.annualized_mean_benchmark_returns_cont[:dt_loc + 1]

        self.algorithm_cumulative_leverages_cont[dt_loc] = leverage
        self.algorithm_cumulative_leverages = \
            self.algorithm_cumulative_leverages_cont[:dt_loc + 1]

        if self.create_first_day_stats:
            if len(self.algorithm_cumulative_leverages) == 1:
                self.algorithm_cumulative_leverages = np.append(
                    0.0,
                    self.algorithm_cumulative_leverages)

        if not len(self.algorithm_returns) and len(self.benchmark_returns):
            message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end} on {dt}"
            message = message.format(
                bm_count=len(self.benchmark_returns),
                algo_count=len(self.algorithm_returns),
                start=self.start_session,
                end=self.end_session,
                dt=dt
            )
            raise Exception(message)

        self.update_current_max()
        self.benchmark_volatility[dt_loc] = annual_volatility(
            self.benchmark_returns
        )
        self.algorithm_volatility[dt_loc] = annual_volatility(
            self.algorithm_returns
        )

        # caching the treasury rates for the minutely case is a
        # big speedup, because it avoids searching the treasury
        # curves on every minute.
        # In both minutely and daily, the daily curve is always used.
        treasury_end = dt.replace(hour=0, minute=0)
        if np.isnan(self.daily_treasury[treasury_end]):
            treasury_period_return = choose_treasury(
                self.treasury_curves,
                self.start_session,
                treasury_end,
                self.trading_calendar,
            )
            self.daily_treasury[treasury_end] = treasury_period_return
        self.treasury_period_return = self.daily_treasury[treasury_end]
        self.excess_returns[dt_loc] = (
            self.algorithm_cumulative_returns[dt_loc] -
            self.treasury_period_return)

        self.alpha[dt_loc], self.beta[dt_loc] = alpha_beta_aligned(
            self.algorithm_returns,
            self.benchmark_returns,
        )
        self.sharpe[dt_loc] = sharpe_ratio(
            self.algorithm_returns,
        )
        self.downside_risk[dt_loc] = downside_risk(
            self.algorithm_returns
        )
        self.sortino[dt_loc] = sortino_ratio(
            self.algorithm_returns,
            _downside_risk=self.downside_risk[dt_loc]
        )
        self.information[dt_loc] = information_ratio(
            self.algorithm_returns,
            self.benchmark_returns,
        )
        self.max_drawdown = max_drawdown(
            self.algorithm_returns
        )
        self.max_drawdowns[dt_loc] = self.max_drawdown
        self.max_leverage = self.calculate_max_leverage()
        self.max_leverages[dt_loc] = self.max_leverage
Example #11
0
    def generate_analysis_data(self, context):
        if self.daily_data_df.shape[0] > 0:
            # Calculate returns
            daily_returns = self.daily_data_df['net'].pct_change()

            # daily return for the first day will always be 0
            daily_returns[0] = (
                self.daily_data_df['net'][0] /
                self.analysis_data.info_data['initial_cash']) - 1

            ytd_returns = daily_returns[
                daily_returns.index >= datetime.datetime(
                    daily_returns.index[-1].year, 1, 1).date()]

            one_year_daily_returns = daily_returns[daily_returns.index >= (
                daily_returns.index[-1] - BDay(252)).date()]

            benchmark_returns = context.trading_environment.benchmark_returns.loc[
                self.daily_data_df.index[0]:self.daily_data_df.index[-1]]
            benchmark_returns.index = benchmark_returns.index.date

            daily_returns = daily_returns.drop(
                daily_returns.index.difference(benchmark_returns.index))
            benchmark_returns = benchmark_returns.drop(
                benchmark_returns.index.difference(daily_returns.index))
            benchmark_returns = benchmark_returns.loc[~benchmark_returns.index.
                                                      duplicated(keep='first')]

            ytd_benchmark_returns = benchmark_returns[
                benchmark_returns.index >= datetime.datetime(
                    benchmark_returns.index[-1].year, 1, 1).date()]

            one_year_benchmark_returns = benchmark_returns[
                benchmark_returns.index >= (benchmark_returns.index[-1] -
                                            BDay(252)).date()]

            portfolio_dd = self.rolling_drawdown(daily_returns.values)
            benchmark_dd = self.rolling_drawdown(benchmark_returns.values)

            report_dict = {}
            benchmark_report_dict = {}

            report_dict['total_return_pct'] = (daily_returns + 1).prod() - 1
            report_dict['total_return'] = self.daily_data_df.iloc[
                -1].net - self.daily_data_df.iloc[0].net
            report_dict['ytd'] = (ytd_returns + 1).prod() - 1
            report_dict['one_year'] = (one_year_daily_returns + 1).prod() - 1
            report_dict['max_drawdown'] = portfolio_dd.min()

            report_dict['sharpe_ratio'] = empyrical.sharpe_ratio(daily_returns)
            report_dict['alpha'], report_dict[
                'beta'] = empyrical.alpha_beta_aligned(daily_returns,
                                                       benchmark_returns)
            cagr = empyrical.cagr(daily_returns)
            print(cagr)
            report_dict['cagr'] = cagr
            report_dict['std'] = daily_returns.std() * 100

            benchmark_report_dict['total_return_pct'] = (benchmark_returns +
                                                         1).prod() - 1
            benchmark_report_dict['total_return'] = self.daily_data_df.iloc[-1].benchmark_net \
                                                    - self.daily_data_df.iloc[0].benchmark_net
            benchmark_report_dict['ytd'] = (ytd_benchmark_returns +
                                            1).prod() - 1
            benchmark_report_dict['one_year'] = (one_year_benchmark_returns +
                                                 1).prod() - 1
            benchmark_report_dict['max_drawdown'] = benchmark_dd.min()
            benchmark_report_dict['sharpe_ratio'] = empyrical.sharpe_ratio(
                benchmark_returns)
            benchmark_report_dict['alpha'], benchmark_report_dict[
                'beta'] = 0, 1
            benchmark_report_dict['cagr'] = empyrical.cagr(benchmark_returns)
            benchmark_report_dict['std'] = benchmark_returns.std() * 100

            self.daily_cagr[daily_returns.index[-1]] = report_dict['cagr']
            self.daily_benchmark_cagr[
                benchmark_returns.index[-1]] = benchmark_report_dict['cagr']

            plot_data_df = pd.concat([daily_returns, benchmark_returns],
                                     axis=1,
                                     keys=['returns', 'benchmark_returns'])

            plot_data_df.reset_index(inplace=True)

            plot_data_df['drawdown'] = portfolio_dd
            plot_data_df['benchmark_drawdown'] = benchmark_dd

            plot_data_df.set_index('date', inplace=True)
            plot_data_df['cagr'] = self.daily_cagr
            plot_data_df['benchmark_cagr'] = self.daily_benchmark_cagr
            plot_data_df['positions_count'] = self.daily_positions_df.groupby(
                'date').size()
            plot_data_df['positions_count'] = plot_data_df[
                'positions_count'].fillna(0)

            self.analysis_data.chart_data = plot_data_df
            self.analysis_data.strategy_report = report_dict
            self.analysis_data.benchmark_report = benchmark_report_dict
            if len(
                    self.daily_positions_df.index.get_level_values(
                        'date').value_counts()) < 30:
                self.analysis_data.holdings_data = self.daily_positions_df.reset_index(
                )
            else:
                self.analysis_data.holdings_data = self.daily_positions_df.loc[
                    self.daily_positions_df.index.get_level_values('date') >=
                    self.daily_positions_df.index.get_level_values('date').
                    value_counts().sort_index().index[-30]].reset_index()
            if len(self.daily_data_df.index) < 30:
                self.analysis_data.monthly_transactions_data = self.transactions_data
            else:
                self.analysis_data.monthly_transactions_data = self.transactions_data[
                    self.transactions_data.date >=
                    self.daily_data_df.index[-30]]
            self.analysis_data.holdings_data_historical = self.daily_positions_df.reset_index(
            )
            self.analysis_data.transactions_data = self.transactions_data