Esempio n. 1
0
 def test_alpha_beta_correlation(self, corr_less, corr_more):
     mean_returns = 0.01
     mean_bench = .001
     std_returns = 0.01
     std_bench = .001
     index = pd.date_range('2000-1-30', periods=1000, freq='D')
     # Generate less correlated returns
     means_less = [mean_returns, mean_bench]
     covs_less = [[std_returns**2, std_returns * std_bench * corr_less],
                  [std_returns * std_bench * corr_less, std_bench**2]]
     (ret_less,
      bench_less) = np.random.multivariate_normal(means_less, covs_less,
                                                  1000).T
     returns_less = pd.Series(ret_less, index=index)
     benchmark_less = pd.Series(bench_less, index=index)
     # Genereate more highly correlated returns
     means_more = [mean_returns, mean_bench]
     covs_more = [[std_returns**2, std_returns * std_bench * corr_more],
                  [std_returns * std_bench * corr_more, std_bench**2]]
     (ret_more,
      bench_more) = np.random.multivariate_normal(means_more, covs_more,
                                                  1000).T
     returns_more = pd.Series(ret_more, index=index)
     benchmark_more = pd.Series(bench_more, index=index)
     # Calculate alpha/beta values
     alpha_less, beta_less = empyrical.alpha_beta(returns_less,
                                                  benchmark_less)
     alpha_more, beta_more = empyrical.alpha_beta(returns_more,
                                                  benchmark_more)
     # Alpha determines by how much returns vary from the benchmark return.
     # A lower correlation leads to higher alpha.
     assert alpha_less > alpha_more
     # Beta measures the volatility of returns against benchmark returns.
     # Beta increases proportionally to correlation.
     assert beta_less < beta_more
Esempio n. 2
0
 def test_alpha_beta(self, returns, benchmark, expected):
     assert_almost_equal(
         empyrical.alpha_beta(returns, benchmark)[0], expected[0],
         DECIMAL_PLACES)
     assert_almost_equal(
         empyrical.alpha_beta(returns, benchmark)[1], expected[1],
         DECIMAL_PLACES)
Esempio n. 3
0
 def test_alpha_beta_translation(self, mean_returns, translation):
     # Generate correlated returns and benchmark.
     std_returns = 0.01
     correlation = 0.8
     std_bench = .001
     means = [mean_returns, .001]
     covs = [[std_returns**2, std_returns * std_bench * correlation],
             [std_returns * std_bench * correlation, std_bench**2]]
     (ret, bench) = np.random.multivariate_normal(means, covs, 1000).T
     returns = pd.Series(ret,
                         index=pd.date_range('2000-1-30',
                                             periods=1000,
                                             freq='D'))
     benchmark = pd.Series(bench,
                           index=pd.date_range('2000-1-30',
                                               periods=1000,
                                               freq='D'))
     # Translate returns and generate alphas and betas.
     returns_depressed = returns - translation
     returns_raised = returns + translation
     (alpha_depressed,
      beta_depressed) = empyrical.alpha_beta(returns_depressed, benchmark)
     (alpha_standard,
      beta_standard) = empyrical.alpha_beta(returns, benchmark)
     (alpha_raised,
      beta_raised) = empyrical.alpha_beta(returns_raised, benchmark)
     # Alpha should change proportionally to how much returns were
     # translated.
     assert_almost_equal((alpha_standard - alpha_depressed) / 252,
                         translation, DECIMAL_PLACES)
     assert_almost_equal((alpha_raised - alpha_standard) / 252, translation,
                         DECIMAL_PLACES)
     # Beta remains constant.
     assert_almost_equal(beta_standard, beta_depressed, DECIMAL_PLACES)
     assert_almost_equal(beta_standard, beta_raised, DECIMAL_PLACES)
Esempio n. 4
0
 def empyrical_alpha_beta(code, startdate, endate):
     mkt_ret = get_return("sh", startdate, endate)
     stock_ret = get_return(code, startdate, endate)
     alpha, beta = ey.alpha_beta(returns=stock_ret,
                                 factor_returns=mkt_ret,
                                 annualization=1)
     return (alpha, beta)
Esempio n. 5
0
 def test_alpha_beta_equality(self, returns, benchmark):
     alpha_beta = empyrical.alpha_beta(returns, benchmark)
     assert_almost_equal(
         alpha_beta[0],
         empyrical.alpha(returns, benchmark),
         DECIMAL_PLACES)
     assert_almost_equal(
         alpha_beta[1],
         empyrical.beta(returns, benchmark),
         DECIMAL_PLACES)
Esempio n. 6
0
 def calculate_max_drawdown(self):
     final_balance, cost, balance, initial_cost = self.visual_account()
     returns = balance / initial_cost
     alpha, beta = alpha_beta(returns, self.benchmark_returns)
     maxdrawdown = max_drawdown(returns)
     plt.scatter(initial_cost, balance)
     plt.xlabel('cost history')
     plt.ylabel('balance history')
     plt.grid = (True)
     plt.show()
     print("Balance: " + str(final_balance) + " Investment cost: " + str(cost))
     print('max drawdown = ' + str(maxdrawdown) + '; alpha = ' + str(alpha) + '; beta= ' + str(beta) + '.')
     return maxdrawdown, alpha, beta
Esempio n. 7
0
    def alpha_beta(self, index, start=None, end=None):
        index_rets = Equity(index).returns
        rets = self.returns
        data = pd.DataFrame()
        data['Index'] = index_rets
        data['Rets'] = rets
        data = data.fillna(0)

        if start:
            data = data[start:]
        if end:
            data = data[start:end]

        return empyrical.alpha_beta(data['Rets'], data['Index'])
Esempio n. 8
0
    def __alphaBeta(self):
        # 计算α β值
        X = self.__return.getCumulativeReturns()
        Y = self.__returnBase.getCumulativeReturns()
        n1 = X.__len__()
        n2 = Y.__len__()
        x = []
        y = []
        if n1 == n2:
            for i in range(n1):
                x.append(X[i])
                y.append(Y[i])
        alpha = 0.0
        beta = 0.0
        b, a, r_value, p_value, std_err = stats.linregress(x, y)
        # alpha转化为年
        alpha = [round(a * 250, 3)]
        beta = [round(b, 3)]
        self.__result["alpha"] = alpha
        self.__result["beta"] = beta

        # 尝试用talib计算beta值
        #x =  self.__feed.getDataSeries(self.__instrument[0]).getCloseDataSeries()
        #		y = self.__feedBase.getDataSeries(self.__base[0]).getCloseDataSeries()
        #		beta2 = BETA(x, y, 0)
        #		print(beta2)

        # 用empyrical计算αβ值
        strategyReturn = self.__return.getReturns()
        baseReturn = self.__returnBase.getReturns()
        Returns = []
        baseReturns = []
        for i in range(len(strategyReturn)):
            Returns.append(strategyReturn[i])
            baseReturns.append(baseReturn[i])
        returns = np.array(Returns)
        basereturns = np.array(baseReturns)
        print(returns)
        print(basereturns)
        alpha, beta = ep.alpha_beta(returns, basereturns)
        self.__result["alpha"] = alpha
        self.__result["beta"] = beta
        print(alpha, beta)

        # 计算信息比率
        # 先计算超额收益
        ex_return = [x[i] - y[i] for i in range(len(x))]
        information = (x[-1] - y[-1]) / np.std(ex_return)
        self.__result["信息比率"] = information
Esempio n. 9
0
    def test_alpha_beta_equality(self, returns, benchmark):
        alpha_beta = empyrical.alpha_beta(returns, benchmark)
        assert_almost_equal(alpha_beta[0], empyrical.alpha(returns, benchmark),
                            DECIMAL_PLACES)
        assert_almost_equal(alpha_beta[1], empyrical.beta(returns, benchmark),
                            DECIMAL_PLACES)

        if len(returns) == len(benchmark):
            # Compare to scipy linregress
            returns_arr = returns.values
            benchmark_arr = benchmark.values
            mask = ~np.isnan(returns_arr) & ~np.isnan(benchmark_arr)
            slope, intercept, _, _, _ = stats.linregress(
                returns_arr[mask], benchmark_arr[mask])

            assert_almost_equal(alpha_beta[0], intercept)
            assert_almost_equal(alpha_beta[1], slope)
Esempio n. 10
0
def performance(ret, benchmark, rf=0.04):
    #计算评价指标
    import empyrical
    max_drawdown = empyrical.max_drawdown(ret)
    total_return = empyrical.cum_returns_final(ret)
    annual_return = empyrical.annual_return(ret)

    sharpe_ratio = empyrical.sharpe_ratio(ret,
                                          risk_free=((1 + rf)**(1 / 252) - 1))
    alpha, beta = empyrical.alpha_beta(ret, benchmark)
    return {
        'total_return': total_return,
        'annual_return': annual_return,
        'max_drawdown': max_drawdown,
        'sharpe_ratio': sharpe_ratio,
        'alpha': alpha,
        'beta': beta
    }
Esempio n. 11
0
def alpha_beta(returns, factor_returns):
    """
    Calculates both alpha and beta.

    Parameters
    ----------
    returns : pd.Series
        Daily returns of the strategy, noncumulative.
        - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
    factor_returns : pd.Series
        Daily noncumulative returns of the benchmark factor to which betas are
        computed. Usually a benchmark such as market returns.
         - This is in the same style as returns.

    Returns
    -------
    float
        Alpha.
    float
        Beta.
    """

    return ep.alpha_beta(returns, factor_returns=factor_returns)
Esempio n. 12
0
def alpha_beta(returns, factor_returns):
    """
    Calculates both alpha and beta.

    Parameters
    ----------
    returns : pd.Series
        Daily returns of the strategy, noncumulative.
        - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
    factor_returns : pd.Series
         Daily noncumulative returns of the factor to which beta is
         computed. Usually a benchmark such as the market.
         - This is in the same style as returns.

    Returns
    -------
    float
        Alpha.
    float
        Beta.
    """

    return empyrical.alpha_beta(returns, factor_returns=factor_returns)
Esempio n. 13
0
def getFundData():
    historicalAllocations, realizedAllocations = getNetAllocationAcrossPortfolios(
    )
    if historicalAllocations is None:
        return None, None
    pulledData, unused_ = dataAck.downloadTickerData(
        historicalAllocations.columns.values)
    allocationJoinedData = dataAck.joinDatasets(
        [pulledData[ticker] for ticker in pulledData])
    dataToCache = []
    for allocationForm in [historicalAllocations, realizedAllocations]:
        performanceByTicker, fundPerformance, fundTransactionCost = portfolioGeneration.calculatePerformanceForAllocations(
            allocationForm, allocationJoinedData)
        if len(fundPerformance) == 0:
            dataToCache.append({})
            continue

        ##CALCULATE BETAS FOR ALL TICKERS TO FUND PERFORMANCE
        tickerAlphaBetas = []
        for ticker in allocationForm.columns.values:
            factorReturn = dataAck.getDailyFactorReturn(
                ticker, allocationJoinedData)
            alpha, beta = empyrical.alpha_beta(fundPerformance, factorReturn)
            tickerAlphaBetas.append({
                "ticker": ticker,
                "alpha": alpha * 100,
                "beta": beta
            })

        tickerCols, tickerRows = portfolioGeneration.convertTableToJSON(
            empyrical.cum_returns(performanceByTicker))
        tickerAllocationsCols, tickerAllocationsRows = portfolioGeneration.convertTableToJSON(
            allocationForm)
        fundCols, fundRows = portfolioGeneration.convertTableToJSON(
            empyrical.cum_returns(fundPerformance))

        sharpe = empyrical.sharpe_ratio(fundPerformance)
        annualReturn = empyrical.annual_return(fundPerformance)[0]
        annualVol = empyrical.annual_volatility(fundPerformance)

        commissionCols, commissionRows = portfolioGeneration.convertTableToJSON(
            fundTransactionCost)

        dataToCache.append({
            "tickerAlphaBetas":
            tickerAlphaBetas,
            "tickerCols":
            json.dumps(tickerCols),
            "tickerRows":
            json.dumps(tickerRows),
            "tickerAllocationsCols":
            json.dumps(tickerAllocationsCols),
            "tickerAllocationsRows":
            json.dumps(tickerAllocationsRows),
            "fundCols":
            json.dumps(fundCols),
            "fundRows":
            json.dumps(fundRows),
            "sharpe":
            sharpe,
            "annualReturn":
            annualReturn * 100,
            "annualVol":
            annualVol * 100,
            "commissionCols":
            json.dumps(commissionCols),
            "commissionRows":
            json.dumps(commissionRows)
        })

    historicalData = dataToCache[0]
    realizedData = dataToCache[1]
    ##GET TODAY ALLOCATION
    if realizedData != {}:
        newRows = []
        tARows = json.loads(realizedData["tickerAllocationsRows"])
        tACols = json.loads(realizedData["tickerAllocationsCols"])
        print(tARows[-1])
        for i in range(len(tACols)):

            newRows.append([tACols[i],
                            abs(tARows[-1][i + 1])])  ##i+1 because date
        realizedData["todayAllocation"] = json.dumps(newRows)
        print(realizedData["todayAllocation"])

    return historicalData, realizedData
Esempio n. 14
0
def analyze(context, perf):
    #print("perf.max_drawdown=", perf.max_drawdown)
    empyrical_max_drawdown = max_drawdown(perf.algorithm_period_return)
    print("empyrical_max_drawdown = ", empyrical_max_drawdown)

    empyrical_tail_ratio = tail_ratio(perf.algorithm_period_return)
    print("empyrical_tail_ratio = ", empyrical_tail_ratio)

    empyrical_sharpe_ratio = sharpe_ratio(perf.algorithm_period_return)
    print("empyrical_sharpe_ratio = ", empyrical_sharpe_ratio)

    empyrical_alpha_beta = alpha_beta(perf.algorithm_period_return,
                                      perf.benchmark_period_return)
    print("empyrical_alpha_beta = ", empyrical_alpha_beta)

    #cum_returns(perf)
    # Save results in CSV file
    filename = "csvoutput"
    perf.to_csv(filename + '.csv')

    filename_orders = "orders_output"

    perf0 = perf[['orders']]
    perf1 = perf0[perf0['orders'].apply(len) > 0]

    perf2 = pd.DataFrame(perf1['orders'])
    #perf2 = pd.DataFrame([x for x in perf1['orders']])
    #print(perf1[['orders',-1]].head(n=5))
    #convert list of dictionaries to dictionary
    perf2["ordersd"] = pd.Series(perf2["orders"].str[0])
    print(perf2["ordersd"].head(70))

    #extracting orders dictionary to multiple columns
    perf2 = pd.DataFrame([x for x in perf2['ordersd']])

    #unpack(perf2, 'ordersd')
    perf2.to_csv(filename_orders + '.csv')

    exchange = list(context.exchanges.values())[0]
    base_currency = exchange.base_currency.upper()

    axl = plt.subplot(411)
    perf.loc[:, ['portfolio_value']].plot(ax=axl)
    axl.legend_.remove()
    axl.set_ylabel('Portfolio Value\n({})'.format(base_currency))
    start, end = axl.get_ylim()
    axl.yaxis.set_ticks(np.arange(start, end, (end - start) / 5))

    ax2 = plt.subplot(412, sharex=axl)
    perf.loc[:, ['price', 'short_ema', 'long_ema']].plot(ax=ax2, label='Price')
    ax2.legend_.remove()
    ax2.set_ylabel('{asset}\n({base})'.format(asset=context.asset.symbol,
                                              base=base_currency))
    start, end = ax2.get_ylim()
    ax2.yaxis.set_ticks(np.arange(start, end, (end - start) / 5))

    transaction_df = extract_transactions(perf)
    if not transaction_df.empty:
        buy_df = transaction_df[transaction_df['amount'] > 0]
        sell_df = transaction_df[transaction_df['amount'] < 0]
        ax2.scatter(buy_df.index.to_pydatetime(),
                    perf.loc[buy_df.index, 'price'],
                    marker='^',
                    s=100,
                    c='green',
                    label='')
        ax2.scatter(sell_df.index.to_pydatetime(),
                    perf.loc[sell_df.index, 'price'],
                    marker='v',
                    s=100,
                    c='red',
                    label='')

    ax3 = plt.subplot(413, sharex=axl)
    perf.loc[:, ['algorithm_period_return', 'price_change']].plot(ax=ax3)
    ax3.legend_.remove()
    ax3.set_ylabel('Percent Change')
    start, end = ax3.get_ylim()
    ax3.yaxis.set_ticks(np.arange(0, end, (end - start) / 5))

    ax4 = plt.subplot(414, sharex=axl)
    perf.cash.plot(ax=ax4)
    ax4.set_ylabel('Cash\n({})'.format(base_currency))
    start, end = ax4.get_ylim()
    ax4.yaxis.set_ticks(np.arange(0, end, end / 5))

    plt.show()
Esempio n. 15
0
    def describer(self):
        tot_cnt = len(self.df)

        missed = self.df[
            (np.sign(self.df['return_close']) != self.df['signal'])
            & (self.df['signal'] == 0)]['strat_return'].describe()
        wrong = self.df[
            (np.sign(self.df['return_close']) == self.df['signal'] *
             (-1)) & (self.df['signal'] != 0)]['strat_return'].describe()
        jackpot = self.df[(np.sign(
            self.df['return_close']) == self.df['signal']
                           )]['strat_return'].describe()

        desc = pd.DataFrame([jackpot, wrong, missed],
                            index=['jackpot', 'wrong',
                                   'missed']).T.to_markdown()

        trans_fee_tot = self.df['transfee'].sum()

        return_ret, return_bench = {}, {}
        self.df['date'] = self.df.index.date
        for (k, v) in self.df.groupby('date'):
            return_ret[k] = v['strat_return'].sum()
            return_bench[k] = v['bench_return'].sum()
        rret = pd.Series(list(return_ret.values()),
                         index=list(return_ret.keys()),
                         name='rret')
        rbench = pd.Series(list(return_bench.values()),
                           index=list(return_bench.keys()),
                           name='rbench')
        (alpha, beta) = alpha_beta(rret, rbench, period='daily')
        sharpe = sharpe_ratio(rret, period='daily')
        max_down = max_drawdown(rret)
        ann_return_strat = annual_return(rret, period='daily')
        ann_return_bench = annual_return(rbench, period='daily')
        t_r = tail_ratio(rret)

        returns = f"Strategy Return: {round(self.pnl * 100, 2)}% | " \
                  f"Strategy Annualized Return: {round(ann_return_strat * 100, 2)}%. \n" \
                  f"BenchMark return: {round(self.df['bench_return'].sum() * 100, 2)}% | " \
                  f"BenchMark Annualized Return: {round(ann_return_bench * 100, 2)}%.\n"

        desc_ = f"Strategy: {self.func} \n" \
                f"Transaction Fee Percentage: {self.trans_fee_pctg}\n" \
                f"Intraday Closing Time: {self.trade_flag}\n" \
                f"Params: {self.signal_params}\n" \
                f"Test Period: {self.start_dt} - {self.end_dt}\n" \
                f"-- {self.id} --\n" \
                f"-- {self.timeframe} -- \n" \
                f"-- Position: {self.pos_} --\n" \
                f"-- Barly Stoploss: {self.stop_loss} --\n" \
                f"-- Action on Sig0: {self.action_on_0} --\n" \
                f"-- Signal Shift: {self.sig_shift} --\n" \
                f"Transaction Fee Total: {round(trans_fee_tot * 100, 2)}%\n" \
                f"Signal Ratio: {round(self.signal_ratio * 100, 2)}%\n" \
                f"Open Position: {self.open_t} times; Close Position: {self.close_t} times\n" \
                f"Sharpe Ratio: {round(sharpe, 2)} \n" \
                f"Tail Ratio: {round(t_r, 2)}\n" \
                f"Alpha: {round(alpha * 100, 2)}% | Beta: {round(beta * 100, 2)}% \n" \
                f"Max Drawdown: {round(max_down * 100, 2)}% \n" \
                f"Max Daily Drawdown: {round(rret.min() * 100, 2)}% \n" \
                f"Total Win: {self.winner} | Total Loss: {self.loser} | " \
                f"W/L Ratio: {round(self.winner / self.loser, 2) if self.loser != 0 else 0}\n"

        source_code = "\n\n".join([inspect.getsource(f) for f in self.func
                                   ]) if self.func is not None else " "
        source_code_neut = "\n\n".join(
            [inspect.getsource(f)
             for f in self.neut_func]) if self.neut_func is not None else " "

        t_stamp = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
        logger.info(f"-- {t_stamp} --\n{desc_}{returns}\n")

        # 所有回测都值得记录
        path_ = os.path.join(
            fp, f"../../docs/backtest/"
            f"{self.id}-{self.timeframe}-Sharpe{round(sharpe, 2)}-{datetime.now().strftime('%y%m%d-%H:%M:%S')}/"
        )
        os.mkdir(path_)

        plot = self.df[['close', 'strat_ret_cumsum',
                        'bench_ret_cumsum']].plot(figsize=(16, 9),
                                                  secondary_y='close')
        fig = plot.get_figure()
        fig_path = os.path.join(path_, f"return_curve.png")
        fig.savefig(fig_path)

        rec_path = os.path.join(path_, "trade_record.csv")
        self.df.to_csv(rec_path)

        desc_path = os.path.join(path_, "desc.txt")
        with open(desc_path, mode="w+", encoding='utf8') as f:
            f.write(
                desc_ + returns + '\n' + f'\nTotal Bars: {tot_cnt} \n' +
                '\nStatistics Desc: \n' + desc +
                '\n* NOTE: THIS DESCRIPTION DIFFERS FROM W/L RATIO ABOVE '
                'BECAUSE ONLY SIGNAL DIRECTION CORRECTNESS IS CONSIDERED HERE.\n'
                + '\n\nBias_factors: \n' + source_code + '\nNeut_factors: \n' +
                source_code_neut)

        # 但只有高夏普低回撤的回测才配拥有高级可视化
        if (sharpe > 1.5) & (max_down >= -0.2 * self.pos_):
            rich_visual_path = os.path.join(path_, "rich_visual.html")
            kline = rv.draw_kline_with_yield_and_signal(self.df)
            scatters_fr = rv.draw_factor_return_eval(self.bias_factor_df)
            scatters_ff = rv.draw_factor_eval(self.bias_factor_df)
            res_charts = [kline, *scatters_fr, *scatters_ff]
            if self.neut_factor_df is not None:
                sca_neut_fr = rv.draw_factor_return_eval(self.neut_factor_df)
                sca_neut_ff = rv.draw_factor_eval(self.neut_factor_df)
                res_charts += [*sca_neut_fr, *sca_neut_ff]
            rv.form_page(res_charts, rich_visual_path)
Esempio n. 16
0
 def test_alpha_beta_with_nan_inputs(self, returns, benchmark):
     alpha, beta = empyrical.alpha_beta(returns, benchmark)
     self.assertNotEqual(alpha, np.nan)
     self.assertNotEqual(beta, np.nan)
            ann_return, cum_return_list[-1], max_drawdown_ratio, sharp))

    returns = pd.Series(index=pd.date_range('2017-03-10', '2017-03-19'),
                        data=(-0.012143, 0.045350, 0.030957, 0.004902,
                              0.002341, -0.02103, 0.00148, 0.004820, -0.00023,
                              0.01201))
    benchmark_returns = pd.Series(index=pd.date_range('2017-03-10',
                                                      '2017-03-19'),
                                  data=(-0.031940, 0.025350, -0.020957,
                                        -0.000902, 0.007341, -0.01103, 0.00248,
                                        0.008820, -0.00123, 0.01091))
    alpha_return = alpha(returns=returns,
                         factor_returns=benchmark_returns,
                         risk_free=0.01)
    beta_return = beta(returns=returns,
                       factor_returns=benchmark_returns,
                       risk_free=0.01)
    print("alpha_return", alpha_return)
    print("\nbeta_return", beta_return)
    ###############
    import numpy as np
    from empyrical import alpha_beta
    returns = np.array([.01, .02, .03, -.4, -.06, -.02])
    benchmark_returns = np.array([.02, .02, .03, -.35, -.05, -.01])
    # calculate the max drawdown
    max_drawdown(returns)
    # calculate alpha and beta
    alpha, beta = alpha_beta(returns, benchmark_returns)
    print("*********")
    print(alpha, beta)
def getDataForPortfolio(portfolioKey, factorToTrade, joinedData,
                        availableStartDate):
    modelHashes = portfolio.getPortfolioModels(portfolioKey)
    models = getModelsByKey(modelHashes)
    for model in models:
        print(model.describe())
    ##GENERATE RETURNS FOR PORTFOLIO
    portfolioAllocations = portfolio.getPortfolioAllocations(portfolioKey)

    predsTable = pd.DataFrame([])
    weightsTable = pd.DataFrame([])
    tickerAllocationsTable = pd.DataFrame([])
    scaledTickerAllocationsTable = pd.DataFrame([])
    for allocation in portfolioAllocations:
        colsAlgo = []
        valsAlgo = []
        colsAlgoWeight = []
        valsAlgoWeight = []
        colsTicker = []
        valsTicker = []
        colsTickerScaled = []
        valsTickerScaled = []

        for key in allocation:
            if key.startswith("ticker_"):
                colsTicker.append(key[len("ticker_"):])
                valsTicker.append(allocation[key])
            if key.startswith("scaled_ticker_"):
                colsTickerScaled.append(key[len("scaled_ticker_"):])
                valsTickerScaled.append(
                    abs(allocation[key]) if np.isnan(allocation[key]) ==
                    False else 0.0)
            if key.startswith("algo_") and not key.startswith("algo_weight_"):
                colsAlgo.append(key[len("algo_"):])
                valsAlgo.append(allocation[key])
            if key.startswith("algo_weight_"):
                colsAlgoWeight.append(key[len("algo_weight_"):])
                valsAlgoWeight.append(allocation[key])

        predsTable = pd.concat([
            predsTable,
            pd.DataFrame([valsAlgo],
                         index=[allocation["predictionDay"]],
                         columns=colsAlgo).tz_localize(None)
        ])
        weightsTable = pd.concat([
            weightsTable,
            pd.DataFrame([valsAlgoWeight],
                         index=[allocation["predictionDay"]],
                         columns=colsAlgoWeight).tz_localize(None)
        ])
        tickerAllocationsTable = pd.concat([
            tickerAllocationsTable,
            pd.DataFrame([valsTicker],
                         index=[allocation["predictionDay"]],
                         columns=colsTicker).tz_localize(None)
        ])
        scaledTickerAllocationsTable = pd.concat([
            scaledTickerAllocationsTable,
            pd.DataFrame([valsTickerScaled],
                         index=[allocation["predictionDay"]],
                         columns=colsTickerScaled).tz_localize(None)
        ])

    predsTable = predsTable.sort_index()
    weightsTable = weightsTable.sort_index().fillna(0)
    tickerAllocationsTable = tickerAllocationsTable.sort_index().fillna(0)
    scaledTickerAllocationsTable = scaledTickerAllocationsTable.sort_index(
    ).fillna(0)

    rawTickerPerformance = portfolioGeneration.calculatePerformanceForTable(
        tickerAllocationsTable, tickerAllocationsTable.columns, joinedData)

    rawAlgoPerformance = pd.DataFrame(
        rawTickerPerformance.apply(lambda x: sum(x), axis=1),
        columns=["Algo Return Without Commissions"])

    tickerPerformance, algoPerformance, algoTransactionCost = portfolioGeneration.calculatePerformanceForAllocations(
        tickerAllocationsTable, joinedData)

    benchmark = portfolio.getPortfolioByKey(portfolioKey)["benchmark"]
    factorReturn = dataAck.getDailyFactorReturn(benchmark, joinedData)
    factorReturn.columns = ["Factor Return (" + benchmark + ")"]
    algoPerformance.columns = ["Algo Return"]
    algoVsBenchmark = factorReturn.join(algoPerformance).fillna(0)
    algoVsBenchmark = algoVsBenchmark.join(rawAlgoPerformance).dropna()

    tickerAlphaBetas = []
    for ticker in tickerAllocationsTable.columns.values:
        thisFactorReturn = dataAck.getDailyFactorReturn(ticker, joinedData)
        alpha, beta = empyrical.alpha_beta(algoPerformance, thisFactorReturn)
        tickerAlphaBetas.append({
            "ticker": ticker,
            "alpha": alpha * 100,
            "beta": beta
        })

    ##GET SCALED PERFORMANCE [FULL CAPITAL USED EACH DAY]
    rawTickerPerformanceScaled = portfolioGeneration.calculatePerformanceForTable(
        scaledTickerAllocationsTable, scaledTickerAllocationsTable.columns,
        joinedData)

    rawAlgoPerformanceScaled = pd.DataFrame(
        rawTickerPerformanceScaled.apply(lambda x: sum(x), axis=1),
        columns=["Algo Return Without Commissions"])

    unused, algoPerformanceScaled, algoTransactionCostScaled = portfolioGeneration.calculatePerformanceForAllocations(
        scaledTickerAllocationsTable, joinedData)

    algoPerformanceScaled.columns = ["Algo Return"]
    algoVsBenchmarkScaled = factorReturn.join(algoPerformanceScaled).fillna(0)
    algoVsBenchmarkScaled = algoVsBenchmarkScaled.join(
        rawAlgoPerformanceScaled).dropna()

    ##FORM HASH TO TICKER
    hashToTicker = {}
    for model in models:
        hashToTicker[model.getHash()] = model.targetTicker
    print(hashToTicker)

    individualAlgoPerformance = portfolioGeneration.calculatePerformanceForTable(
        predsTable,
        [hashToTicker[modelHash] for modelHash in predsTable.columns],
        joinedData)

    ##CONVERT TO USABLE OBJECTS
    tickerCols, tickerRows = portfolioGeneration.convertTableToJSON(
        empyrical.cum_returns(tickerPerformance))
    tickerAllocationsCols, tickerAllocationsRows = portfolioGeneration.convertTableToJSON(
        tickerAllocationsTable[-10:])
    algoCols, algoRows = portfolioGeneration.convertTableToJSON(
        empyrical.cum_returns(algoPerformance))
    algoVsBenchmarkCols, algoVsBenchmarkRows = portfolioGeneration.convertTableToJSON(
        empyrical.cum_returns(algoVsBenchmark))
    individualAlgoPerformanceCols, individualAlgoPerformanceRows = portfolioGeneration.convertTableToJSON(
        empyrical.cum_returns(individualAlgoPerformance))
    scaledAllocationCols, scaledAllocationRows = portfolioGeneration.convertTableToJSON(
        scaledTickerAllocationsTable)
    weightsCols, weightsRows = portfolioGeneration.convertTableToJSON(
        weightsTable)
    alpha, beta = empyrical.alpha_beta(algoPerformance, factorReturn)
    recentAlpha, recentBeta = empyrical.alpha_beta(algoPerformance[-100:],
                                                   factorReturn[-100:])
    recentSharpe = empyrical.sharpe_ratio(algoPerformance[-100:])
    recentReturn = empyrical.cum_returns(
        algoPerformance[-100:]).values[-1][0] * 100
    algoVsBenchmarkColsRecent, algoVsBenchmarkRowsRecent = portfolioGeneration.convertTableToJSON(
        empyrical.cum_returns(algoVsBenchmark[-100:]))
    commissionCols, commissionRows = portfolioGeneration.convertTableToJSON(
        algoTransactionCost)

    algoVsBenchmarkScaledCols, algoVsBenchmarkScaledRows = portfolioGeneration.convertTableToJSON(
        empyrical.cum_returns(algoVsBenchmarkScaled))
    commissionScaledCols, commissionScaledRows = portfolioGeneration.convertTableToJSON(
        algoTransactionCostScaled)
    scaledSharpe = empyrical.sharpe_ratio(algoPerformanceScaled)
    scaledReturn = empyrical.annual_return(algoPerformanceScaled)[0] * 100
    scaledVolatility = empyrical.annual_volatility(algoPerformanceScaled) * 100
    scaledAlpha, scaledBeta = empyrical.alpha_beta(algoPerformanceScaled,
                                                   factorReturn)

    algoVsBenchmarkScaledColsRecent, algoVsBenchmarkScaledRowsRecent = portfolioGeneration.convertTableToJSON(
        empyrical.cum_returns(algoVsBenchmarkScaled[-100:]))
    scaledSharpeRecent = empyrical.sharpe_ratio(algoPerformanceScaled[-100:])
    scaledReturnRecent = empyrical.annual_return(
        algoPerformanceScaled[-100:])[0] * 100
    scaledVolatilityRecent = empyrical.annual_volatility(
        algoPerformanceScaled[-100:]) * 100
    scaledAlphaRecent, scaledBetaRecent = empyrical.alpha_beta(
        algoPerformanceScaled[-100:], factorReturn[-100:])

    if len(algoPerformance[availableStartDate:]) > 0:
        ##NORMAL
        availableAlpha, availableBeta = empyrical.alpha_beta(
            algoPerformance[availableStartDate:],
            factorReturn[availableStartDate:])
        availableAlpha = availableAlpha * 100
        availableSharpe = empyrical.sharpe_ratio(
            algoPerformance[availableStartDate:])
        availableReturn = empyrical.cum_returns(
            algoPerformance[availableStartDate:]).values[-1][0] * 100
        algoVsBenchmarkColsAvailable, algoVsBenchmarkRowsAvailable = portfolioGeneration.convertTableToJSON(
            empyrical.cum_returns(algoVsBenchmark[availableStartDate:]))

        ##SCALED
        availableAlphaScaled, availableBetaScaled = empyrical.alpha_beta(
            algoPerformanceScaled[availableStartDate:],
            factorReturn[availableStartDate:])
        availableAlphaScaled = availableAlphaScaled * 100
        availableSharpeScaled = empyrical.sharpe_ratio(
            algoPerformanceScaled[availableStartDate:])
        availableReturnScaled = empyrical.cum_returns(
            algoPerformanceScaled[availableStartDate:]).values[-1][0] * 100
        algoVsBenchmarkColsAvailableScaled, algoVsBenchmarkRowsAvailableScaled = portfolioGeneration.convertTableToJSON(
            empyrical.cum_returns(algoVsBenchmarkScaled[availableStartDate:]))
    else:
        #NORMAL
        availableAlpha, availableBeta = ("NaN", "NaN")
        availableSharpe = "NaN"
        availableReturn = "NaN"
        algoVsBenchmarkColsAvailable, algoVsBenchmarkRowsAvailable = ([], [])

        #SCALED
        availableAlphaScaled, availableBetaScaled = ("NaN", "NaN")
        availableSharpeScaled = "NaN"
        availableReturnScaled = "NaN"
        algoVsBenchmarkColsAvailableScaled, algoVsBenchmarkRowsAvailableScaled = (
            [], [])

    return {
        "tickerCols":
        json.dumps(tickerCols),
        "tickerRows":
        json.dumps(tickerRows),
        "tickerAllocationsCols":
        json.dumps(tickerAllocationsCols),
        "tickerAllocationsRows":
        json.dumps(tickerAllocationsRows),
        "algoCols":
        json.dumps(algoCols),
        "algoRows":
        json.dumps(algoRows),
        "tickerCols":
        json.dumps(tickerCols),
        "tickerRows":
        json.dumps(tickerRows),
        "algoVsBenchmarkCols":
        json.dumps(algoVsBenchmarkCols),
        "algoVsBenchmarkRows":
        json.dumps(algoVsBenchmarkRows),
        "individualAlgoPerformanceCols":
        json.dumps(individualAlgoPerformanceCols),
        "individualAlgoPerformanceRows":
        json.dumps(individualAlgoPerformanceRows),
        "scaledAllocationCols":
        json.dumps(scaledAllocationCols),
        "scaledAllocationRows":
        json.dumps(scaledAllocationRows),
        "weightsCols":
        json.dumps(weightsCols),
        "weightsRows":
        json.dumps(weightsRows),
        "algoSharpe":
        empyrical.sharpe_ratio(algoPerformance),
        "alpha":
        alpha * 100,
        "beta":
        beta,
        "annualReturn":
        empyrical.annual_return(algoPerformance)[0] * 100,
        "annualVolatility":
        empyrical.annual_volatility(algoPerformance) * 100,
        "recentSharpe":
        recentSharpe,
        "recentReturn":
        recentReturn,
        "recentAlpha":
        recentAlpha * 100,
        "recentBeta":
        recentBeta,
        "algoVsBenchmarkColsRecent":
        json.dumps(algoVsBenchmarkColsRecent),
        "algoVsBenchmarkRowsRecent":
        json.dumps(algoVsBenchmarkRowsRecent),
        "commissionCols":
        json.dumps(commissionCols),
        "commissionRows":
        json.dumps(commissionRows),
        "tickerAlphaBetas":
        tickerAlphaBetas,
        "availableAlpha":
        availableAlpha,
        "availableBeta":
        availableBeta,
        "availableSharpe":
        availableSharpe,
        "availableReturn":
        availableReturn,
        "algoVsBenchmarkColsAvailable":
        json.dumps(algoVsBenchmarkColsAvailable),
        "algoVsBenchmarkRowsAvailable":
        json.dumps(algoVsBenchmarkRowsAvailable),
        "algoVsBenchmarkScaledCols":
        json.dumps(algoVsBenchmarkScaledCols),
        "algoVsBenchmarkScaledRows":
        json.dumps(algoVsBenchmarkScaledRows),
        "commissionScaledCols":
        json.dumps(commissionScaledCols),
        "commissionScaledRows":
        json.dumps(commissionScaledRows),
        "scaledReturn":
        scaledReturn,
        "scaledSharpe":
        scaledSharpe,
        "scaledVolatility":
        scaledVolatility,
        "scaledAlpha":
        scaledAlpha * 100,
        "scaledBeta":
        scaledBeta,
        "algoVsBenchmarkScaledColsRecent":
        json.dumps(algoVsBenchmarkScaledColsRecent),
        "algoVsBenchmarkScaledRowsRecent":
        json.dumps(algoVsBenchmarkScaledRowsRecent),
        "scaledReturnRecent":
        scaledReturnRecent,
        "scaledVolatilityRecent":
        scaledVolatilityRecent,
        "scaledAlphaRecent":
        scaledAlphaRecent * 100,
        "scaledBetaRecent":
        scaledBetaRecent,
        "scaledSharpeRecent":
        scaledSharpeRecent,
        "availableAlphaScaled":
        availableAlphaScaled,
        "availableBetaScaled":
        availableBetaScaled,
        "availableSharpeScaled":
        availableSharpeScaled,
        "availableReturnScaled":
        availableReturnScaled,
        "algoVsBenchmarkColsAvailableScaled":
        json.dumps(algoVsBenchmarkColsAvailableScaled),
        "algoVsBenchmarkRowsAvailableScaled":
        json.dumps(algoVsBenchmarkRowsAvailableScaled),
    }
def getLimitedDataForPortfolio(historicalWeights, historicalPredictions, modelsUsed, factorToTrade, joinedData):
    
    normalTickerAllocationsTable, scaledTickerAllocationsTable = historicalWeightsToTickerAllocations(historicalWeights, historicalPredictions, modelsUsed)
    
    # capitalUsed = pd.DataFrame(normalTickerAllocationsTable.apply(lambda x: sum([abs(item) for item in x]), axis=1))
    # print(capitalUsed)

    tickerAllocationsTable = scaledTickerAllocationsTable #scaledTickerAllocationsTable
    tickerAllocationsTable = tickerAllocationsTable.fillna(0)

    tickerPerformance, algoPerformance, algoTransactionCost =  portfolioGeneration.calculatePerformanceForAllocations(tickerAllocationsTable, joinedData)

    benchmark = factorToTrade
    factorReturn = dataAck.getDailyFactorReturn(benchmark, joinedData)
    factorReturn.columns = ["Factor Return (" + benchmark + ")"]
    algoPerformance.columns = ["Algo Return"]

    algoPerformanceRollingWeekly = algoPerformance.rolling(5, min_periods=5).apply(lambda x:empyrical.cum_returns(x)[-1]).dropna()
    algoPerformanceRollingWeekly.columns = ["Weekly Rolling Performance"]
    
    algoPerformanceRollingMonthly = algoPerformance.rolling(22, min_periods=22).apply(lambda x:empyrical.cum_returns(x)[-1]).dropna()
    algoPerformanceRollingMonthly.columns = ["Monthly Rolling Performance"]
    
    algoPerformanceRollingYearly = algoPerformance.rolling(252, min_periods=252).apply(lambda x:empyrical.cum_returns(x)[-1]).dropna()
    algoPerformanceRollingYearly.columns = ["Yearly Rolling Performance"]
    
    tickersUsed = []
    for mod in modelsUsed:
        tickersUsed.append(mod.targetTicker)
    
#     for ticker in tickersUsed:
#         thisFactorReturn = dataAck.getDailyFactorReturn(ticker, joinedData)
#         thisFactorReturn.columns = ["Factor Return (" + ticker + ")"]
#         alpha, beta = empyrical.alpha_beta(algoPerformance, thisFactorReturn)
#         print(ticker, beta)
    
    alpha, beta = empyrical.alpha_beta(algoPerformance, factorReturn)
    sharpe_difference = empyrical.sharpe_ratio(algoPerformance) - empyrical.sharpe_ratio(factorReturn)
    annualizedReturn = empyrical.annual_return(algoPerformance)[0]
    annualizedVolatility = empyrical.annual_volatility(algoPerformance)
    stability = empyrical.stability_of_timeseries(algoPerformance)
    profitability = len((algoPerformance.values)[algoPerformance.values > 0])/len(algoPerformance.values)
    


    rollingSharpe = algoPerformance.rolling(252, min_periods=252).apply(lambda x:empyrical.sharpe_ratio(x)).dropna()
    rollingSharpe.columns = ["252 Day Rolling Sharpe"]

    rollingSharpeError = rollingSharpe["252 Day Rolling Sharpe"].std()
    rollingSharpeMinimum = np.percentile(rollingSharpe["252 Day Rolling Sharpe"].values, 1)

    ##AUTOMATICALLY TAKES SLIPPAGE INTO ACCOUNT
    return {
        "benchmark":factorToTrade,
        "alpha":alpha,
        "beta":abs(beta),
        "sharpe difference":sharpe_difference,
        "annualizedReturn":annualizedReturn,
        "annualizedVolatility":annualizedVolatility,
        "sharpe":empyrical.sharpe_ratio(algoPerformance),
        "free return":annualizedReturn - annualizedVolatility,
        "stability":stability,
        "profitability":profitability,
        "rollingSharpeError":rollingSharpeError,
        "rollingSharpeMinimum":rollingSharpeMinimum,
        "weeklyMinimum":algoPerformanceRollingWeekly.min().values[0],
        "monthlyMinimum":algoPerformanceRollingMonthly.min().values[0],
        "yearlyMinimum":algoPerformanceRollingYearly.min().values[0]
    }, tickerAllocationsTable
Esempio n. 20
0
def get_report(my_portfolio,
               rf=0.0,
               sigma_value=1,
               confidence_value=0.95,
               filename: str = "report.pdf"):
    try:
        # we want to get the dataframe with the dates and weights
        rebalance_schedule = my_portfolio.rebalance

        columns = []
        for date in rebalance_schedule.columns:
            date = date[0:10]
            columns.append(date)
        rebalance_schedule.columns = columns

        # then want to make a list of the dates and start with our first date
        dates = [my_portfolio.start_date]

        # then our rebalancing dates into that list
        dates = dates + rebalance_schedule.columns.to_list()

        datess = []
        for date in dates:
            date = date[0:10]
            datess.append(date)
        dates = datess
        # this will hold returns
        returns = pd.Series()

        # then we want to be able to call the dates like tuples
        for i in range(len(dates) - 1):
            # get our weights
            weights = rebalance_schedule[str(dates[i + 1])]

            # then we want to get the returns

            add_returns = get_returns(
                my_portfolio.portfolio,
                weights,
                start_date=dates[i],
                end_date=dates[i + 1],
            )

            # then append those returns
            returns = returns.append(add_returns)

    except AttributeError:
        try:
            returns = get_returns_from_data(my_portfolio.data,
                                            my_portfolio.weights)
        except AttributeError:
            returns = get_returns(
                my_portfolio.portfolio,
                my_portfolio.weights,
                start_date=my_portfolio.start_date,
                end_date=my_portfolio.end_date,
            )

    creturns = (returns + 1).cumprod()

    # risk manager
    try:
        if list(my_portfolio.risk_manager.keys())[0] == "Stop Loss":

            values = []
            for r in creturns:
                if r <= 1 + my_portfolio.risk_manager["Stop Loss"]:
                    values.append(r)
                else:
                    pass

            try:
                date = creturns[creturns == values[0]].index[0]
                date = str(date.to_pydatetime())
                my_portfolio.end_date = date[0:10]
                returns = returns[:my_portfolio.end_date]

            except Exception as e:
                pass

        if list(my_portfolio.risk_manager.keys())[0] == "Take Profit":

            values = []
            for r in creturns:
                if r >= 1 + my_portfolio.risk_manager["Take Profit"]:
                    values.append(r)
                else:
                    pass

            try:
                date = creturns[creturns == values[0]].index[0]
                date = str(date.to_pydatetime())
                my_portfolio.end_date = date[0:10]
                returns = returns[:my_portfolio.end_date]

            except Exception as e:
                pass

        if list(my_portfolio.risk_manager.keys())[0] == "Max Drawdown":

            drawdown = qs.stats.to_drawdown_series(returns)

            values = []
            for r in drawdown:
                if r <= my_portfolio.risk_manager["Max Drawdown"]:
                    values.append(r)
                else:
                    pass

            try:
                date = drawdown[drawdown == values[0]].index[0]
                date = str(date.to_pydatetime())
                my_portfolio.end_date = date[0:10]
                returns = returns[:my_portfolio.end_date]

            except Exception as e:
                pass

    except Exception as e:
        pass

    fig1, ax1 = plt.subplots()
    fig1.set_size_inches(5, 5)

    #defining colors for the allocation pie
    cs = [
        "#ff9999",
        "#66b3ff",
        "#99ff99",
        "#ffcc99",
        "#f6c9ff",
        "#a6fff6",
        "#fffeb8",
        "#ffe1d4",
        "#cccdff",
        "#fad6ff",
    ]

    wts = copy.deepcopy(my_portfolio.weights)
    port = copy.deepcopy(my_portfolio.portfolio)
    indices = [i for i, x in enumerate(wts) if x == 0.0]

    while 0.0 in wts:
        wts.remove(0.0)

    for i in sorted(indices, reverse=True):
        del port[i]

    ax1.pie(wts, labels=port, autopct="%1.1f%%", shadow=False, colors=cs)
    ax1.axis(
        "equal")  # Equal aspect ratio ensures that pie is drawn as a circle.
    plt.rcParams["font.size"] = 12
    plt.close(fig1)
    fig1.savefig("allocation.png")

    pdf = FPDF()
    pdf.add_page()
    pdf.set_font("arial", "B", 14)
    pdf.image(
        "https://user-images.githubusercontent.com/61618641/120909011-98f8a180-c670-11eb-8844-2d423ba3fa9c.png",
        x=None,
        y=None,
        w=45,
        h=5,
        type="",
        link="https://github.com/ssantoshp/Empyrial",
    )
    pdf.cell(20, 15, f"Report", ln=1)
    pdf.set_font("arial", size=11)
    pdf.image("allocation.png", x=135, y=0, w=70, h=70, type="", link="")
    pdf.cell(20, 7, f"Start date: " + str(my_portfolio.start_date), ln=1)
    pdf.cell(20, 7, f"End date: " + str(my_portfolio.end_date), ln=1)

    benchmark = get_returns(
        my_portfolio.benchmark,
        wts=[1],
        start_date=my_portfolio.start_date,
        end_date=my_portfolio.end_date,
    )

    CAGR = cagr(returns, period='daily', annualization=None)
    # CAGR = round(CAGR, 2)
    # CAGR = CAGR.tolist()
    CAGR = str(round(CAGR * 100, 2)) + "%"

    CUM = cum_returns(returns, starting_value=0, out=None) * 100
    CUM = CUM.iloc[-1]
    CUM = CUM.tolist()
    CUM = str(round(CUM, 2)) + "%"

    VOL = qs.stats.volatility(returns, annualize=True)
    VOL = VOL.tolist()
    VOL = str(round(VOL * 100, 2)) + " %"

    SR = qs.stats.sharpe(returns, rf=rf)
    SR = np.round(SR, decimals=2)
    SR = str(SR)

    empyrial.SR = SR

    CR = qs.stats.calmar(returns)
    CR = CR.tolist()
    CR = str(round(CR, 2))

    empyrial.CR = CR

    STABILITY = stability_of_timeseries(returns)
    STABILITY = round(STABILITY, 2)
    STABILITY = str(STABILITY)

    MD = max_drawdown(returns, out=None)
    MD = str(round(MD * 100, 2)) + " %"
    """OR = omega_ratio(returns, risk_free=0.0, required_return=0.0)
    OR = round(OR,2)
    OR = str(OR)
    print(OR)"""

    SOR = sortino_ratio(returns, required_return=0, period='daily')
    SOR = round(SOR, 2)
    SOR = str(SOR)

    SK = qs.stats.skew(returns)
    SK = round(SK, 2)
    SK = SK.tolist()
    SK = str(SK)

    KU = qs.stats.kurtosis(returns)
    KU = round(KU, 2)
    KU = KU.tolist()
    KU = str(KU)

    TA = tail_ratio(returns)
    TA = round(TA, 2)
    TA = str(TA)

    CSR = qs.stats.common_sense_ratio(returns)
    CSR = round(CSR, 2)
    CSR = CSR.tolist()
    CSR = str(CSR)

    VAR = qs.stats.value_at_risk(returns,
                                 sigma=sigma_value,
                                 confidence=confidence_value)
    VAR = np.round(VAR, decimals=2)
    VAR = str(VAR * 100) + " %"

    alpha, beta = alpha_beta(returns, benchmark, risk_free=rf)
    AL = round(alpha, 2)
    BTA = round(beta, 2)

    def condition(x):
        return x > 0

    win = sum(condition(x) for x in returns)
    total = len(returns)
    win_ratio = win / total
    win_ratio = win_ratio * 100
    win_ratio = round(win_ratio, 2)

    IR = calculate_information_ratio(returns, benchmark.iloc[:, 0])
    IR = round(IR, 2)

    data = {
        "": [
            "Annual return",
            "Cumulative return",
            "Annual volatility",
            "Winning day ratio",
            "Sharpe ratio",
            "Calmar ratio",
            "Information ratio",
            "Stability",
            "Max Drawdown",
            "Sortino ratio",
            "Skew",
            "Kurtosis",
            "Tail Ratio",
            "Common sense ratio",
            "Daily value at risk",
            "Alpha",
            "Beta",
        ],
        "Backtest": [
            CAGR,
            CUM,
            VOL,
            f"{win_ratio}%",
            SR,
            CR,
            IR,
            STABILITY,
            MD,
            SOR,
            SK,
            KU,
            TA,
            CSR,
            VAR,
            AL,
            BTA,
        ],
    }

    # Create DataFrame
    df = pd.DataFrame(data)
    df.set_index("", inplace=True)
    df.style.set_properties(**{
        "background-color": "white",
        "color": "black",
        "border-color": "black"
    })

    empyrial.df = data

    y = []
    for x in returns:
        y.append(x)

    arr = np.array(y)
    # arr
    # returns.index
    my_color = np.where(arr >= 0, "blue", "grey")
    ret = plt.figure(figsize=(30, 8))
    plt.vlines(x=returns.index, ymin=0, ymax=arr, color=my_color, alpha=0.4)
    plt.title("Returns")
    plt.close(ret)
    ret.savefig("ret.png")

    pdf.cell(20, 7, f"", ln=1)
    pdf.cell(20, 7, f"Annual return: " + str(CAGR), ln=1)
    pdf.cell(20, 7, f"Cumulative return: " + str(CUM), ln=1)
    pdf.cell(20, 7, f"Annual volatility: " + str(VOL), ln=1)
    pdf.cell(20, 7, f"Winning day ratio: " + str(win_ratio), ln=1)
    pdf.cell(20, 7, f"Sharpe ratio: " + str(SR), ln=1)
    pdf.cell(20, 7, f"Calmar ratio: " + str(CR), ln=1)
    pdf.cell(20, 7, f"Information ratio: " + str(IR), ln=1)
    pdf.cell(20, 7, f"Stability: " + str(STABILITY), ln=1)
    pdf.cell(20, 7, f"Max drawdown: " + str(MD), ln=1)
    pdf.cell(20, 7, f"Sortino ratio: " + str(SOR), ln=1)
    pdf.cell(20, 7, f"Skew: " + str(SK), ln=1)
    pdf.cell(20, 7, f"Kurtosis: " + str(KU), ln=1)
    pdf.cell(20, 7, f"Tail ratio: " + str(TA), ln=1)
    pdf.cell(20, 7, f"Common sense ratio: " + str(CSR), ln=1)
    pdf.cell(20, 7, f"Daily value at risk: " + str(VAR), ln=1)
    pdf.cell(20, 7, f"Alpha: " + str(AL), ln=1)
    pdf.cell(20, 7, f"Beta: " + str(BTA), ln=1)

    qs.plots.returns(returns,
                     benchmark,
                     cumulative=True,
                     savefig="retbench.png",
                     show=False)
    qs.plots.yearly_returns(returns,
                            benchmark,
                            savefig="y_returns.png",
                            show=False),
    qs.plots.monthly_heatmap(returns, savefig="heatmap.png", show=False)
    qs.plots.drawdown(returns, savefig="drawdown.png", show=False)
    qs.plots.drawdowns_periods(returns, savefig="d_periods.png", show=False)
    qs.plots.rolling_volatility(returns, savefig="rvol.png", show=False)
    qs.plots.rolling_sharpe(returns, savefig="rsharpe.png", show=False)
    qs.plots.rolling_beta(returns, benchmark, savefig="rbeta.png", show=False)

    pdf.image("ret.png", x=-20, y=None, w=250, h=80, type="", link="")
    pdf.cell(20, 7, f"", ln=1)
    pdf.image("y_returns.png", x=-20, y=None, w=200, h=100, type="", link="")
    pdf.cell(20, 7, f"", ln=1)
    pdf.image("retbench.png", x=None, y=None, w=200, h=100, type="", link="")
    pdf.cell(20, 7, f"", ln=1)
    pdf.image("heatmap.png", x=None, y=None, w=200, h=80, type="", link="")
    pdf.cell(20, 7, f"", ln=1)
    pdf.image("drawdown.png", x=None, y=None, w=200, h=80, type="", link="")
    pdf.cell(20, 7, f"", ln=1)
    pdf.image("d_periods.png", x=None, y=None, w=200, h=80, type="", link="")
    pdf.cell(20, 7, f"", ln=1)
    pdf.image("rvol.png", x=None, y=None, w=190, h=80, type="", link="")
    pdf.cell(20, 7, f"", ln=1)
    pdf.image("rsharpe.png", x=None, y=None, w=190, h=80, type="", link="")
    pdf.cell(20, 7, f"", ln=1)
    pdf.image("rbeta.png", x=None, y=None, w=190, h=80, type="", link="")

    pdf.output(dest="F", name=filename)
Esempio n. 21
0
    def _finalize_backtesting(self):

        # self.trading_df = pd.DataFrame(columns=['close_price', 'signal', 'order', 'cash', 'crypto', 'total_value'],
        # columns=['close_price', 'signal', 'order', 'cash', 'crypto', 'total_value'])
        self.trading_df = pd.DataFrame(self.trading_df_rows,
                                       columns=['timestamp', 'close_price', 'signal', 'order', 'cash', 'crypto', 'total_value',
                                                'order_obj', 'signal_obj'])
        self.trading_df = self.trading_df.set_index('timestamp')
        assert self.trading_df.index.is_monotonic_increasing
        # set finishing variable values
        self._end_cash = self._cash
        self._end_crypto = self._crypto

        # compute returns for stats
        self.trading_df = self._fill_returns(self.trading_df)
        returns = np.array(self.trading_df['return_relative_to_past_tick'])
        # self._max_drawdown = empyrical.max_drawdown(np.array(returns))
        self._max_drawdown, start_dd, end_dd = self._compute_max_drawdown()
        self._max_drawdown_duration = end_dd - start_dd
        self._sharpe_ratio = empyrical.sharpe_ratio(returns)

        # extract only rows that have orders
        self.orders_df = self.trading_df[self.trading_df['order'] != ""]
        # recalculate returns
        self.orders_df = self._fill_returns(self.orders_df)
        # get profits on sell
        orders_sell_df = self.orders_df[self.orders_df['order'] == "SELL"]
        self._buy_sell_pair_returns = np.array(orders_sell_df['return_relative_to_past_tick'])
        self._buy_sell_pair_gains = self._buy_sell_pair_returns[np.where(self._buy_sell_pair_returns > 0)]
        self._buy_sell_pair_losses = self._buy_sell_pair_returns[np.where(self._buy_sell_pair_returns < 0)]

        self._num_gains = len(self._buy_sell_pair_gains)
        self._num_losses = len(self._buy_sell_pair_losses)

        # if no returns, no gains or no losses, stat functions will return nan
        if len(self._buy_sell_pair_returns) == 0:
            self._buy_sell_pair_returns = np.array([np.nan])

        if len(self._buy_sell_pair_gains) == 0:
            self._buy_sell_pair_gains = np.array([np.nan])

        if len(self._buy_sell_pair_losses) == 0:
            self._buy_sell_pair_losses = np.array([np.nan])

        if self._benchmark_backtest is not None:
            if len(self.benchmark_backtest.noncumulative_returns) != len(self.noncumulative_returns):
                logging.debug('Incompatible noncumulative returns fields of backtester and benchmark! '
                              'Alpha and beta not calculated.')
                self._alpha = None
                self._beta = None
            else:
                self._alpha, self._beta = \
                    empyrical.alpha_beta(self.noncumulative_returns, self._benchmark_backtest.noncumulative_returns)
        else:
            self._alpha = self._beta = np.nan

        # fill end price
        self._fill_end_price()

        # fill end USDT value
        self._fill_end_usdt_value()

        if self._verbose:
            logging.info(self.get_report())
Esempio n. 22
0
def runstrategy(ticker_list,bench_ticker):
    args = parse_args()
    print(args)

    # Create a cerebro
    cerebro = bt.Cerebro()

    # Get the dates from the args
    fromdate = datetime.datetime.strptime(args.fromdate, '%Y-%m-%d')
    todate = datetime.datetime.strptime(args.todate, '%Y-%m-%d')

    # bench = bt.feeds.YahooFinanceData(
    #     dataname=bench_ticker,
    #     fromdate=fromdate,
    #     todate=todate,
    #     buffered=True,plot = False
    # )

    bench = bt.feeds.GenericCSVData(
        dataname='/Users/joan/PycharmProjects/CSV_DB/IB/' + bench_ticker + '.csv',
        fromdate=fromdate,
        todate=todate,
        nullvalue=0.0,
        dtformat=('%Y%m%d'),
        datetime=1,
        open=2,
        high=3,
        low=4,
        close=5,
        volume=6,
        reverse=False,
        plot=False)

    cerebro.adddata(bench, name=bench_ticker)

    for i in ticker_list:
        print('Loading data: '+ i)
        # data = bt.feeds.YahooFinanceData(
        #     dataname=i,
        #     fromdate=fromdate,
        #     todate=todate,
        #     adjclose=True,
        #     buffered=True, plot = False
        #     )

        data = bt.feeds.GenericCSVData(
            dataname='/Users/joan/PycharmProjects/CSV_DB/IB/'+i+'.csv',
            fromdate=fromdate,
            todate=todate,
            nullvalue=0.0,
            dtformat=('%Y%m%d'),
            datetime=1,
            open=2,
            high=3,
            low=4,
            close=5,
            volume=6,
            reverse=False,
            plot= False)


        cerebro.adddata(data,name = i)




    # Add the strategy
    cerebro.addstrategy(PairTradingStrategy,
                        period=args.period,
                        stake=args.stake)

    # Add the commission - only stocks like a for each operation
    cerebro.broker.setcash(args.cash)

    # Add the commission - only stocks like a for each operation
    # cerebro.broker.setcommission(commission=args.commperc)

    comminfo = FixedCommisionScheme()
    cerebro.broker.addcommissioninfo(comminfo)

    cerebro.addanalyzer(bt.analyzers.SharpeRatio, _name='sharpe_ratio')

    cerebro.addanalyzer(bt.analyzers.TradeAnalyzer, _name="ta")
    cerebro.addanalyzer(bt.analyzers.SQN, _name="sqn")
    cerebro.addanalyzer(bt.analyzers.SharpeRatio_A, _name='myysharpe', riskfreerate=args.rf_rate)
    cerebro.addanalyzer(bt.analyzers.PyFolio, _name='mypyf')
    cerebro.addanalyzer(bt.analyzers.TimeReturn, timeframe=bt.TimeFrame.Days,
                        data=bench, _name='benchreturns')

    cerebro.addobserver(bt.observers.Value)
    cerebro.addobserver(bt.observers.Benchmark,plot = False)
    cerebro.addobserver(bt.observers.DrawDown)

    # And run it
    strat = cerebro.run(runonce=not args.runnext,
                preload=not args.nopreload,
                oldsync=args.oldsync
                )

    # Plot if requested
    if args.plot:
        cerebro.plot(style='candlestick', barup='green', bardown='red',figsize=(100,100))





    bench_returns = strat[0].analyzers.benchreturns.get_analysis()
    bench_df = pd.DataFrame.from_dict(bench_returns, orient='index', columns=['return'])
    return_df = pd.DataFrame.from_dict(strat[0].analyzers.mypyf.get_analysis()['returns'], orient='index',
                                       columns=['return'])

    # print('Sharpe Ratio(bt):', firstStrat.analyzers.myysharpe.get_analysis()['sharperatio'])
    # print('Sharpe Ratio:', empyrical.sharpe_ratio(return_df, risk_free=args.rf_rate / 252, period='daily')[0])
    # print('Sharpe Ratio Benchmark:', empyrical.sharpe_ratio(bench_df, risk_free=args.rf_rate / 252, period='daily')[0])
    # print('')
    #
    # print('Sortino Ratio:', empyrical.sortino_ratio(return_df, period='daily')[0])
    # print('Sortino Ratio Benchmark:', empyrical.sortino_ratio(bench_df, period='daily')[0])
    # print('')
    # print('VaR:', empyrical.value_at_risk(return_df) * 100, '%')
    # print('VaR Benchmark:', empyrical.value_at_risk(bench_df) * 100, '%')
    #
    # print('')
    #
    # print('Capture:', round(empyrical.capture(return_df, bench_df, period='daily')[0] * 100), '%')
    # print('')
    #
    # print('Max drawdown: ', round(empyrical.max_drawdown(return_df)[0] * 100), '%')
    # print('Max drawdown Benchmark: ', round(empyrical.max_drawdown(bench_df)[0] * 100), '%')
    #
    # print('')
    alpha, beta = empyrical.alpha_beta(return_df, bench_df, risk_free=args.rf_rate)
    # print('Beta: ', beta)
    # print('')
    # print('Annual return:', round(empyrical.annual_return(return_df)[0] * 100), '%')
    # print('Annual Vol:', round(empyrical.annual_volatility(return_df)[0] * 100), '%')
    # print('')
    # print('Annual return Benchmark:', round(empyrical.annual_return(bench_df)[0] * 100), '%')
    # print('Annual Vol Benchmark:', round(empyrical.annual_volatility(bench_df)[0] * 100), '%')
    # print('')

    dic = {'SQN': printSQN(strat[0].analyzers.sqn.get_analysis()),
            'sharpe': empyrical.sharpe_ratio(return_df, risk_free=args.rf_rate / 252, period='daily')[0],
           'sharpe_bm': empyrical.sharpe_ratio(bench_df, risk_free=args.rf_rate / 252, period='daily')[0],
           'sortino': empyrical.sortino_ratio(bench_df, period='daily')[0],
           'sortino_bm': empyrical.sortino_ratio(bench_df, period='daily')[0],
           'VaR': empyrical.value_at_risk(return_df) * 100,
           'VaR_bm': empyrical.value_at_risk(bench_df) * 100,
           'capture': round(empyrical.capture(return_df, bench_df, period='daily')[0] * 100),
           'max_dd': round(empyrical.max_drawdown(return_df)[0] * 100),
           'max_dd_bm':round(empyrical.max_drawdown(bench_df)[0] * 100),
           'beta': beta,
           'return_annual':round(empyrical.annual_return(return_df)[0] * 100,2),
           'return_annual_bm':round(empyrical.annual_volatility(return_df)[0] * 100,2),
           'vol_annual':round(empyrical.annual_return(bench_df)[0] * 100,2),
           'vol_annual_bm':round(empyrical.annual_volatility(bench_df)[0] * 100,2)}

    df = pd.DataFrame(dic,index = [0])
    print(df)

    def calc_stats(df):
        df['perc_ret'] = (1 + df['return']).cumprod() - 1
        # print(df.tail())
        return df

    s = return_df.rolling(30).std()
    b = bench_df.rolling(30).std()

    # Get final portfolio Value
    portvalue = cerebro.broker.getvalue()

    # Print out the final result
    print('Final Portfolio Value: ${}'.format(round(portvalue)), 'PnL: ${}'.format(round(portvalue - args.cash)),
          'PnL: {}%'.format(((portvalue / args.cash) - 1) * 100))

    # Finally plot the end results

    if args.plot:


        fig, axs = plt.subplots(2, sharex=True)
        fig.autofmt_xdate()

        axs[1].plot(s)
        axs[1].plot(b)

        axs[1].set_title('Drawdown')
        axs[1].legend(['Fund', 'Benchmark'])

        axs[0].set_title('Returns')
        axs[0].plot(calc_stats(return_df)['perc_ret'])
        axs[0].plot(calc_stats(bench_df)['perc_ret'])
        axs[0].legend(['Fund', 'Benchmark'])
        plt.show()
Esempio n. 23
0
    def runModelsChunksSkipMP(self, dataOfInterest, daysToCheck = None):
        xVals, yVals, yIndex, xToday = self.walkForward.generateWindows(dataOfInterest)
        mpEngine = mp.get_context('fork')
        with mpEngine.Manager() as manager:
            returnDict = manager.dict()
            
            identifiersToCheck = []
            
            for i in range(len(xVals) - 44): ##44 is lag...should not overlap with any other predictions or will ruin validity of walkforward optimization
                if i < 600:
                    ##MIN TRAINING
                    continue
                identifiersToCheck.append(str(i))
                
            if daysToCheck is not None:
                identifiersToCheck = identifiersToCheck[-daysToCheck:]


            ##FIRST CHECK FIRST 500 IDENTIFIERS AND THEN IF GOOD CONTINUE
            

            identifierWindows = [identifiersToCheck[:252], identifiersToCheck[252:600], identifiersToCheck[600:900], identifiersToCheck[900:1200], identifiersToCheck[1200:]] ##EXACTLY TWO YEARS
            returnStream = None
            factorReturn = None
            predictions = None
            slippageAdjustedReturn = None
            shortSeen = 0
            for clippedIdentifiers in identifierWindows:
                
                splitIdentifiers = np.array_split(np.array(clippedIdentifiers), 16)
                
                
                runningP = []
                k = 0
                for identifiers in splitIdentifiers:
                    p = mpEngine.Process(target=endToEnd.runDayChunking, args=(self, xVals, yVals, identifiers, returnDict,k))
                    p.start()
                    runningP.append(p)
                    
                    k += 1
                    

                while len(runningP) > 0:
                    newP = []
                    for p in runningP:
                        if p.is_alive() == True:
                            newP.append(p)
                        else:
                            p.join()
                    runningP = newP
                    
                
                preds = []
                actuals = []
                days = []
                for i in clippedIdentifiers:
                    preds.append(returnDict[i])
                    actuals.append(yVals[int(i) + 44])
                    days.append(yIndex[int(i) + 44])

                loss = log_loss(np.array(endToEnd.transformTargetArr(np.array(actuals), self.threshold)), np.array(preds))
                roc_auc = roc_auc_score(np.array(endToEnd.transformTargetArr(np.array(actuals), self.threshold)), np.array(preds))
                accuracy = accuracy_score(np.array(endToEnd.transformTargetArr(np.array(actuals), self.threshold)), np.array(preds).round())
                print(loss, roc_auc, accuracy)
                ##CREATE ACCURATE BLENDING ACROSS DAYS
                predsTable = pd.DataFrame(preds, index=days, columns=["Predictions"])
                i = 1
                tablesToJoin = []
                while i < self.walkForward.predictionPeriod:
                    thisTable = predsTable.shift(i)
                    thisTable.columns = ["Predictions_" + str(i)]
                    tablesToJoin.append(thisTable)
                    i += 1
                predsTable = predsTable.join(tablesToJoin)
                
                transformedPreds = pd.DataFrame(predsTable.apply(lambda x:computePosition(x), axis=1), columns=["Predictions"]).dropna()
                dailyFactorReturn = getDailyFactorReturn(self.walkForward.targetTicker, dataOfInterest)
                transformedPreds = transformedPreds.join(dailyFactorReturn).dropna()
                returnStream = pd.DataFrame(transformedPreds.apply(lambda x:x[0] * x[1], axis=1), columns=["Algo Return"]) if returnStream is None else pd.concat([returnStream, pd.DataFrame(transformedPreds.apply(lambda x:x[0] * x[1], axis=1), columns=["Algo Return"])])
                factorReturn = pd.DataFrame(transformedPreds[["Factor Return"]]) if factorReturn is None else pd.concat([factorReturn, pd.DataFrame(transformedPreds[["Factor Return"]])])
                predictions = pd.DataFrame(transformedPreds[["Predictions"]]) if predictions is None else pd.concat([predictions, pd.DataFrame(transformedPreds[["Predictions"]])])

                alpha, beta = empyrical.alpha_beta(returnStream, factorReturn)
                rawBeta = abs(empyrical.alpha_beta(returnStream.apply(lambda x:applyBinary(x), axis=0), factorReturn.apply(lambda x:applyBinary(x), axis=0))[1])
                shortSharpe = empyrical.sharpe_ratio(returnStream)
                activity = np.count_nonzero(returnStream)/float(len(returnStream))
                algoAnnualReturn = empyrical.annual_return(returnStream.values)[0]
                algoVol = empyrical.annual_volatility(returnStream.values)
                factorAnnualReturn = empyrical.annual_return(factorReturn.values)[0]
                factorVol = empyrical.annual_volatility(factorReturn.values)
                treynor = ((empyrical.annual_return(returnStream.values)[0] - empyrical.annual_return(factorReturn.values)[0]) \
                           / abs(empyrical.beta(returnStream, factorReturn)))
                sharpeDiff = empyrical.sharpe_ratio(returnStream) - empyrical.sharpe_ratio(factorReturn)
                relativeSharpe = sharpeDiff / empyrical.sharpe_ratio(factorReturn) * (empyrical.sharpe_ratio(factorReturn)/abs(empyrical.sharpe_ratio(factorReturn)))
                stability = empyrical.stability_of_timeseries(returnStream)

                ##CALCULATE SHARPE WITH SLIPPAGE
                estimatedSlippageLoss = portfolioGeneration.estimateTransactionCost(predictions)
                estimatedSlippageLoss.columns = returnStream.columns
                slippageAdjustedReturn = (returnStream - estimatedSlippageLoss).dropna()
                slippageSharpe = empyrical.sharpe_ratio(slippageAdjustedReturn)
                sharpeDiffSlippage = empyrical.sharpe_ratio(slippageAdjustedReturn) - empyrical.sharpe_ratio(factorReturn)
                relativeSharpeSlippage = sharpeDiffSlippage / empyrical.sharpe_ratio(factorReturn) * (empyrical.sharpe_ratio(factorReturn)/abs(empyrical.sharpe_ratio(factorReturn)))

                if (empyrical.sharpe_ratio(returnStream) < 0.0 or abs(beta) > 0.7 or activity < 0.5 or accuracy < 0.45) and shortSeen == 0:
                    return None, {
                            "sharpe":shortSharpe, ##OVERLOADED IN FAIL
                            "factorSharpe":empyrical.sharpe_ratio(factorReturn),
                            "sharpeSlippage":slippageSharpe,
                            "beta":abs(beta),
                            "alpha":alpha,
                            "activity":activity,
                            "treynor":treynor,
                            "period":"first 252 days",
                            "algoReturn":algoAnnualReturn,
                            "algoVol":algoVol,
                            "factorReturn":factorAnnualReturn,
                            "factorVol":factorVol,
                            "sharpeDiff":sharpeDiff,
                            "relativeSharpe":relativeSharpe,
                            "sharpeDiffSlippage":sharpeDiffSlippage,
                            "relativeSharpeSlippage":relativeSharpeSlippage,
                            "rawBeta":rawBeta,
                            "stability":stability,
                            "loss":loss,
                            "roc_auc":roc_auc,
                            "accuracy":accuracy
                    }, None, None
                
                elif (((empyrical.sharpe_ratio(returnStream) < 0.25 or slippageSharpe < 0.0) and shortSeen == 1) or ((empyrical.sharpe_ratio(returnStream) < 0.25 or slippageSharpe < 0.0) and (shortSeen == 2 or shortSeen == 3)) or abs(beta) > 0.6 or activity < 0.6 or stability < 0.4  or accuracy < 0.45) and (shortSeen == 1 or shortSeen == 2 or shortSeen == 3):
                    periodName = "first 600 days"
                    if shortSeen == 2:
                        periodName = "first 900 days"
                    elif shortSeen == 3:
                        periodName = "first 1200 days"
                    return None, {
                            "sharpe":shortSharpe, ##OVERLOADED IN FAIL
                            "factorSharpe":empyrical.sharpe_ratio(factorReturn),
                            "sharpeSlippage":slippageSharpe,
                            "alpha":alpha,
                            "beta":abs(beta),
                            "activity":activity,
                            "treynor":treynor,
                            "period":periodName,
                            "algoReturn":algoAnnualReturn,
                            "algoVol":algoVol,
                            "factorReturn":factorAnnualReturn,
                            "factorVol":factorVol,
                            "sharpeDiff":sharpeDiff,
                            "relativeSharpe":relativeSharpe,
                            "sharpeDiffSlippage":sharpeDiffSlippage,
                            "relativeSharpeSlippage":relativeSharpeSlippage,
                            "rawBeta":rawBeta,
                            "stability":stability,
                            "loss":loss,
                            "roc_auc":roc_auc,
                            "accuracy":accuracy
                    }, None, None
                    
                elif shortSeen < 4:
                    print("CONTINUING", "SHARPE:", shortSharpe, "SHARPE DIFF:", sharpeDiff, "RAW BETA:", rawBeta, "TREYNOR:", treynor)
                   
                shortSeen += 1

            return returnStream, factorReturn, predictions, slippageAdjustedReturn
Esempio n. 24
0
def analyze(context, perf): 
    #print("perf.max_drawdown=", perf.max_drawdown)
    empyrical_max_drawdown = max_drawdown(perf.algorithm_period_return)
    print("empyrical_max_drawdown = ", empyrical_max_drawdown)
    
    empyrical_tail_ratio = tail_ratio(perf.algorithm_period_return)
    print("empyrical_tail_ratio = ", empyrical_tail_ratio)
    
    empyrical_sharpe_ratio = sharpe_ratio(perf.algorithm_period_return)
    print("empyrical_sharpe_ratio = ", empyrical_sharpe_ratio)
    
    empyrical_alpha_beta = alpha_beta(perf.algorithm_period_return, perf.benchmark_period_return)
    print("empyrical_alpha_beta = ", empyrical_alpha_beta)    
    
    
    
    #cum_returns(perf)
    # Save results in CSV file
    filename = "csvoutput"
    perf.to_csv(filename + '.csv')      
    
    exchange = list(context.exchanges.values())[0]
    base_currency = exchange.base_currency.upper()
    
    axl = plt.subplot(411)
    perf.loc[:, ['portfolio_value']].plot(ax = axl)
    axl.legend_.remove()
    axl.set_ylabel('Portfolio Value\n({})'.format(base_currency))
    start, end = axl.get_ylim()
    axl.yaxis.set_ticks(np.arange(start, end, (end-start) / 5))
    
    ax2 = plt.subplot(412, sharex = axl)
    perf.loc[:,['price','short_ema','long_ema']].plot(ax = ax2, label = 'Price')
    ax2.legend_.remove()
    ax2.set_ylabel('{asset}\n({base})'.format(asset = context.asset.symbol, base = base_currency))
    start, end = ax2.get_ylim()
    ax2.yaxis.set_ticks(np.arange(start, end, (end-start) / 5))
    
    transaction_df = extract_transactions(perf)
    if not transaction_df.empty:
        buy_df = transaction_df[transaction_df['amount'] > 0]
        sell_df = transaction_df[transaction_df['amount'] < 0]
        ax2.scatter(buy_df.index.to_pydatetime(), perf.loc[buy_df.index, 'price'], marker = '^', s = 100, c = 'green', label = '')
        ax2.scatter(sell_df.index.to_pydatetime(), perf.loc[sell_df.index, 'price'], marker = 'v', s = 100, c = 'red', label = '')    
    
    
    ax3 = plt.subplot(413, sharex = axl)
    perf.loc[:,['algorithm_period_return', 'price_change']].plot(ax = ax3)
    ax3.legend_.remove()
    ax3.set_ylabel('Percent Change')
    start, end = ax3.get_ylim()
    ax3.yaxis.set_ticks(np.arange(0, end, (end-start) / 5))
    
    
    
    
    ax4 = plt.subplot(414, sharex = axl)
    perf.cash.plot(ax = ax4)
    ax4.set_ylabel('Cash\n({})'.format(base_currency))
    start, end = ax4.get_ylim()
    ax4.yaxis.set_ticks(np.arange(0, end, end / 5))
    
    
    
    plt.show()
Esempio n. 25
0
print('VaR:', empyrical.value_at_risk(return_df) * 100, '%')
print('VaR Benchmark:', empyrical.value_at_risk(bench_df) * 100, '%')

print('')

print('Capture:',
      round(empyrical.capture(return_df, bench_df, period='daily')[0] * 100),
      '%')
print('')

print('Max drawdown: ', round(empyrical.max_drawdown(return_df)[0] * 100), '%')
print('Max drawdown Benchmark: ',
      round(empyrical.max_drawdown(bench_df)[0] * 100), '%')

print('')
alpha, beta = empyrical.alpha_beta(return_df, bench_df, risk_free=rf)
print('Beta: ', beta)
print('')
print('Annual return:', round(empyrical.annual_return(return_df)[0] * 100),
      '%')
print('Annual Vol:', round(empyrical.annual_volatility(return_df)[0] * 100),
      '%')
print('')
print('Annual return Benchmark:',
      round(empyrical.annual_return(bench_df)[0] * 100), '%')
print('Annual Vol Benchmark:',
      round(empyrical.annual_volatility(bench_df)[0] * 100), '%')
print('')


def calc_stats(df):
Esempio n. 26
0
    strategyBase.run()

    # 输出数据
    return_test = retTest.getReturns()
    return_test_arr = toArray(return_test)
    return_base = retBase.getReturns()
    return_base_arr = toArray(return_base)
    result_test = strategyTest.getResult()
    result_base = strategyBase.getResult()
    cum_return_test = retTest.getCumulativeReturns()
    cum_return_test_arr = toArray(cum_return_test)
    cum_return_base = retBase.getCumulativeReturns()
    cum_return_base_arr = toArray(cum_return_base)
    srTest = sharpeTest.getSharpeRatio(0.036)
    srBase = sharpeBase.getSharpeRatio(0.036)
    alpha, beta = ep.alpha_beta(np.array(return_test_arr),
                                np.array(return_base_arr), 0.036)

    print("test分期收益率")
    output(return_test_arr)
    print("base分期收益率")
    output(return_base_arr)
    print("test策略期末收益")
    print(result_test)
    print("base策略期末收益")
    print(result_base)
    print("test累积收益率")
    output(cum_return_test_arr)
    print("base累积收益率")
    output(cum_return_base_arr)
    print("test夏普比率")
    print(srTest)
Esempio n. 27
0
def empyrial(my_portfolio, rf=0.0, sigma_value=1, confidence_value=0.95):
    try:
        # we want to get the dataframe with the dates and weights
        rebalance_schedule = my_portfolio.rebalance

        columns = []
        for date in rebalance_schedule.columns:
            date = date[0:10]
            columns.append(date)
        rebalance_schedule.columns = columns

        # then want to make a list of the dates and start with our first date
        dates = [my_portfolio.start_date]

        # then our rebalancing dates into that list
        dates = dates + rebalance_schedule.columns.to_list()

        datess = []
        for date in dates:
            date = date[0:10]
            datess.append(date)
        dates = datess
        # this will hold returns
        returns = pd.Series()

        # then we want to be able to call the dates like tuples
        for i in range(len(dates) - 1):
            # get our weights
            weights = rebalance_schedule[str(dates[i + 1])]

            # then we want to get the returns

            add_returns = get_returns(
                my_portfolio.portfolio,
                weights,
                start_date=dates[i],
                end_date=dates[i + 1],
            )

            # then append those returns
            returns = returns.append(add_returns)

    except AttributeError:
        try:
            returns = get_returns_from_data(my_portfolio.data,
                                            my_portfolio.weights)
        except AttributeError:
            returns = get_returns(
                my_portfolio.portfolio,
                my_portfolio.weights,
                start_date=my_portfolio.start_date,
                end_date=my_portfolio.end_date,
            )

    creturns = (returns + 1).cumprod()

    # risk manager
    try:
        if list(my_portfolio.risk_manager.keys())[0] == "Stop Loss":

            values = []
            for r in creturns:
                if r <= 1 + my_portfolio.risk_manager["Stop Loss"]:
                    values.append(r)
                else:
                    pass

            try:
                date = creturns[creturns == values[0]].index[0]
                date = str(date.to_pydatetime())
                my_portfolio.end_date = date[0:10]
                returns = returns[:my_portfolio.end_date]

            except Exception as e:
                pass

        if list(my_portfolio.risk_manager.keys())[0] == "Take Profit":

            values = []
            for r in creturns:
                if r >= 1 + my_portfolio.risk_manager["Take Profit"]:
                    values.append(r)
                else:
                    pass

            try:
                date = creturns[creturns == values[0]].index[0]
                date = str(date.to_pydatetime())
                my_portfolio.end_date = date[0:10]
                returns = returns[:my_portfolio.end_date]

            except Exception as e:
                pass

        if list(my_portfolio.risk_manager.keys())[0] == "Max Drawdown":

            drawdown = qs.stats.to_drawdown_series(returns)

            values = []
            for r in drawdown:
                if r <= my_portfolio.risk_manager["Max Drawdown"]:
                    values.append(r)
                else:
                    pass

            try:
                date = drawdown[drawdown == values[0]].index[0]
                date = str(date.to_pydatetime())
                my_portfolio.end_date = date[0:10]
                returns = returns[:my_portfolio.end_date]

            except Exception as e:
                pass

    except Exception as e:
        pass

    print("Start date: " + str(my_portfolio.start_date))
    print("End date: " + str(my_portfolio.end_date))

    benchmark = get_returns(
        my_portfolio.benchmark,
        wts=[1],
        start_date=my_portfolio.start_date,
        end_date=my_portfolio.end_date,
    )

    CAGR = cagr(returns, period='daily', annualization=None)
    # CAGR = round(CAGR, 2)
    # CAGR = CAGR.tolist()
    CAGR = str(round(CAGR * 100, 2)) + "%"

    CUM = cum_returns(returns, starting_value=0, out=None) * 100
    CUM = CUM.iloc[-1]
    CUM = CUM.tolist()
    CUM = str(round(CUM, 2)) + "%"

    VOL = qs.stats.volatility(returns, annualize=True)
    VOL = VOL.tolist()
    VOL = str(round(VOL * 100, 2)) + " %"

    SR = qs.stats.sharpe(returns, rf=rf)
    SR = np.round(SR, decimals=2)
    SR = str(SR)

    empyrial.SR = SR

    CR = qs.stats.calmar(returns)
    CR = CR.tolist()
    CR = str(round(CR, 2))

    empyrial.CR = CR

    STABILITY = stability_of_timeseries(returns)
    STABILITY = round(STABILITY, 2)
    STABILITY = str(STABILITY)

    MD = max_drawdown(returns, out=None)
    MD = str(round(MD * 100, 2)) + " %"
    """OR = omega_ratio(returns, risk_free=0.0, required_return=0.0)
    OR = round(OR,2)
    OR = str(OR)
    print(OR)"""

    SOR = sortino_ratio(returns, required_return=0, period='daily')
    SOR = round(SOR, 2)
    SOR = str(SOR)

    SK = qs.stats.skew(returns)
    SK = round(SK, 2)
    SK = SK.tolist()
    SK = str(SK)

    KU = qs.stats.kurtosis(returns)
    KU = round(KU, 2)
    KU = KU.tolist()
    KU = str(KU)

    TA = tail_ratio(returns)
    TA = round(TA, 2)
    TA = str(TA)

    CSR = qs.stats.common_sense_ratio(returns)
    CSR = round(CSR, 2)
    CSR = CSR.tolist()
    CSR = str(CSR)

    VAR = qs.stats.value_at_risk(returns,
                                 sigma=sigma_value,
                                 confidence=confidence_value)
    VAR = np.round(VAR, decimals=2)
    VAR = str(VAR * 100) + " %"

    alpha, beta = alpha_beta(returns, benchmark, risk_free=rf)
    AL = round(alpha, 2)
    BTA = round(beta, 2)

    def condition(x):
        return x > 0

    win = sum(condition(x) for x in returns)
    total = len(returns)
    win_ratio = win / total
    win_ratio = win_ratio * 100
    win_ratio = round(win_ratio, 2)

    IR = calculate_information_ratio(returns, benchmark.iloc[:, 0])
    IR = round(IR, 2)

    data = {
        "": [
            "Annual return",
            "Cumulative return",
            "Annual volatility",
            "Winning day ratio",
            "Sharpe ratio",
            "Calmar ratio",
            "Information ratio",
            "Stability",
            "Max Drawdown",
            "Sortino ratio",
            "Skew",
            "Kurtosis",
            "Tail Ratio",
            "Common sense ratio",
            "Daily value at risk",
            "Alpha",
            "Beta",
        ],
        "Backtest": [
            CAGR,
            CUM,
            VOL,
            f"{win_ratio}%",
            SR,
            CR,
            IR,
            STABILITY,
            MD,
            SOR,
            SK,
            KU,
            TA,
            CSR,
            VAR,
            AL,
            BTA,
        ],
    }

    # Create DataFrame
    df = pd.DataFrame(data)
    df.set_index("", inplace=True)
    df.style.set_properties(**{
        "background-color": "white",
        "color": "black",
        "border-color": "black"
    })
    display(df)

    empyrial.df = data

    y = []
    for x in returns:
        y.append(x)

    arr = np.array(y)
    # arr
    # returns.index
    my_color = np.where(arr >= 0, "blue", "grey")
    plt.figure(figsize=(30, 8))
    plt.vlines(x=returns.index, ymin=0, ymax=arr, color=my_color, alpha=0.4)
    plt.title("Returns")

    empyrial.returns = returns
    empyrial.creturns = creturns
    empyrial.benchmark = benchmark
    empyrial.CAGR = CAGR
    empyrial.CUM = CUM
    empyrial.VOL = VOL
    empyrial.SR = SR
    empyrial.win_ratio = win_ratio
    empyrial.CR = CR
    empyrial.IR = IR
    empyrial.STABILITY = STABILITY
    empyrial.MD = MD
    empyrial.SOR = SOR
    empyrial.SK = SK
    empyrial.KU = KU
    empyrial.TA = TA
    empyrial.CSR = CSR
    empyrial.VAR = VAR
    empyrial.AL = AL
    empyrial.BTA = BTA

    try:
        empyrial.orderbook = make_rebalance.output
    except Exception as e:
        OrderBook = pd.DataFrame({
            "Assets": my_portfolio.portfolio,
            "Allocation": my_portfolio.weights,
        })

        empyrial.orderbook = OrderBook.T

    wts = copy.deepcopy(my_portfolio.weights)
    indices = [i for i, x in enumerate(wts) if x == 0.0]

    while 0.0 in wts:
        wts.remove(0.0)

    for i in sorted(indices, reverse=True):
        del my_portfolio.portfolio[i]

    return (
        qs.plots.returns(returns, benchmark, cumulative=True),
        qs.plots.yearly_returns(returns, benchmark),
        qs.plots.monthly_heatmap(returns),
        qs.plots.drawdown(returns),
        qs.plots.drawdowns_periods(returns),
        qs.plots.rolling_volatility(returns),
        qs.plots.rolling_sharpe(returns),
        qs.plots.rolling_beta(returns, benchmark),
        graph_opt(my_portfolio.portfolio, wts, pie_size=7, font_size=14),
    )
Esempio n. 28
0
 def _alpha_beta(self):
     self.__alpha, self.__beta = ey.alpha_beta(returns = self.__returns, factor_returns = self.__benchReturns, risk_free = self.__risk_free, annualization = 1)
Esempio n. 29
0
 def alpha_beta(self):
     final_return = numpy.array(self.returns)
     benchmark = numpy.array(self.market)
     alpha, beta = alpha_beta(final_return, benchmark)
     return alpha, beta
Esempio n. 30
0
                   left_index=True,
                   right_index=True,
                   how="inner")
    print(Ret)
    # 计算无风险收益
    rf = 1.036**(1 / 360) - 1.0
    print(rf)
    # 计算股票超额收益率和市场风险溢酬
    Eret = Ret - rf
    print(Eret)
    # 接下来进行拟合
    model = sm.OLS(np_test_return, sm.add_constant(np_base_return))
    result = model.fit()
    print(result.summary())
    print("empyrical")
    alpha, beta = ep.alpha_beta(np_test_return, np_base_return, 0.036)
    print(alpha, beta)
    # 另一种方法
    x = []
    y = []
    print(test_return)
    for i in range(10):
        x.append(base_return[i])
        y.append(test_return[i])
    b, a, r_value, p_value, std_err = stats.linregress(x, y)
    print(a, b)

    # 计算夏普比率
    sharpe = (np_test_return.mean() -
              0.03) / np_test_return.std() * np.sqrt(252)
    print(sharpe)
Esempio n. 31
0
def vizResults(slippageAdjustedReturn, returnStream, factorReturn, plotting = False):
    ##ENSURE EQUAL LENGTH
    factorReturn = factorReturn[returnStream.index[0]:] ##IF FACTOR DOES NOT START AT SAME SPOT CAN CREATE VERY SKEWED RESULTS

    ##CALCULATE SHARPE WITH SLIPPAGE
    sharpeDiffSlippage = empyrical.sharpe_ratio(slippageAdjustedReturn) - empyrical.sharpe_ratio(factorReturn)
    relativeSharpeSlippage = sharpeDiffSlippage / empyrical.sharpe_ratio(factorReturn) * (empyrical.sharpe_ratio(factorReturn)/abs(empyrical.sharpe_ratio(factorReturn)))

    alpha, beta = empyrical.alpha_beta(returnStream, factorReturn)
    alphaSlippage, betaSlippage = empyrical.alpha_beta(slippageAdjustedReturn, factorReturn)
    metrics = {"SHARPE": empyrical.sharpe_ratio(returnStream),
               "SHARPE SLIPPAGE":empyrical.sharpe_ratio(slippageAdjustedReturn),
               "STABILITY": empyrical.stability_of_timeseries(returnStream),
               "ALPHA":alpha,
               "ALPHA SLIPPAGE":alphaSlippage,
               "BETA":abs(beta),
               "ANNUALIZED RETURN": empyrical.annual_return(returnStream)[0],
               "ACTIVITY": np.count_nonzero(returnStream)/float(len(returnStream)),
               "TREYNOR": ((empyrical.annual_return(returnStream.values)[0] - empyrical.annual_return(factorReturn.values)[0]) \
                           / abs(empyrical.beta(returnStream, factorReturn))),
               "RAW BETA":abs(empyrical.alpha_beta(returnStream.apply(lambda x:applyBinary(x), axis=0), factorReturn.apply(lambda x:applyBinary(x), axis=0))[1]),
               "SHARPE DIFFERENCE": empyrical.sharpe_ratio(returnStream) - empyrical.sharpe_ratio(factorReturn),
               "RELATIVE SHARPE": (empyrical.sharpe_ratio(returnStream) - empyrical.sharpe_ratio(factorReturn))/empyrical.sharpe_ratio(factorReturn) * (empyrical.sharpe_ratio(factorReturn)/abs(empyrical.sharpe_ratio(factorReturn))),
               "FACTOR SHARPE": empyrical.sharpe_ratio(factorReturn),
               "SHARPE DIFFERENCE SLIPPAGE":sharpeDiffSlippage,
               "RELATIVE SHARPE SLIPPAGE":relativeSharpeSlippage,
              }

    metrics["FACTOR PROFITABILITY"] = len((factorReturn.values)[factorReturn.values > 0])/len(factorReturn.values)
    metrics["PROFITABILITY"] = len((returnStream.values)[returnStream.values > 0])/len(returnStream.values)

    metrics["PROFITABILITY DIFFERENCE"] = metrics["PROFITABILITY"] - metrics["FACTOR PROFITABILITY"] 

    metrics["PROFITABILITY SLIPPAGE"] = len((slippageAdjustedReturn.values)[slippageAdjustedReturn.values > 0])/len(slippageAdjustedReturn.values)
    
    metrics["ACTIVE PROFITABILITY"] = len((returnStream.values)[returnStream.values > 0])/len((returnStream.values)[returnStream.values != 0])
    metrics["ACTIVE PROFITABILITY SLIPPAGE"] = len((slippageAdjustedReturn.values)[slippageAdjustedReturn.values > 0])/len((slippageAdjustedReturn.values)[slippageAdjustedReturn.values != 0])

    metrics["TOTAL DAYS SEEN"] = len(returnStream)
    metrics["SHARPE SLIPPAGE DECAY"] = metrics["SHARPE DIFFERENCE SLIPPAGE"] - metrics["SHARPE DIFFERENCE"]
    ##MEASURES BINARY STABILITY OF PREDICTIONS
    metrics["EXTREME STABILITY ROLLING 600"] = (returnStream.rolling(600, min_periods=600).apply(lambda x:empyrical.stability_of_timeseries(applyBinarySkipZero(x)) * (-1 if x[-1] - x[0] < 0 else 1)).dropna()).min().values[0]
    metrics["EXTREME STABILITY"] = empyrical.stability_of_timeseries(applyBinarySkipZero(returnStream.values))
    rollingPeriod = 252


    

    rollingSharpe = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.sharpe_ratio(x)).dropna()
    rollingSharpe.columns = ["252 Day Rolling Sharpe"]
    rollingSharpeFactor = factorReturn.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.sharpe_ratio(x)).dropna()
    rollingSharpe = rollingSharpe.join(rollingSharpeFactor)
    rollingSharpe.columns = ["252 Day Rolling Sharpe Algo", "252 Day Rolling Sharpe Factor"]
    
    if len(rollingSharpe["252 Day Rolling Sharpe Algo"].values) > 50:

        diffSharpe = pd.DataFrame(rollingSharpe.apply(lambda x: x[0] - x[1], axis=1), columns=["Sharpe Difference"])
        metrics["SHARPE DIFFERENCE MIN"] = np.percentile(diffSharpe["Sharpe Difference"].values, 1)
        metrics["SHARPE DIFFERENCE AVERAGE"] = np.percentile(diffSharpe["Sharpe Difference"].values, 50)
        difVals = diffSharpe["Sharpe Difference"].values
        metrics["SHARPE DIFFERENCE GREATER THAN 0"] = len(difVals[np.where(difVals > 0)])/float(len(difVals))
        metrics["25TH PERCENTILE SHARPE DIFFERENCE"] = np.percentile(diffSharpe["Sharpe Difference"].values, 25)
        ###

        relDiffSharpe = pd.DataFrame(rollingSharpe.apply(lambda x: (x[0] - x[1])/x[1] * (x[1]/abs(x[1])), axis=1), columns=["Sharpe Difference"])
        metrics["RELATIVE SHARPE DIFFERENCE MIN"] = np.percentile(relDiffSharpe["Sharpe Difference"].values, 1)
        metrics["RELATIVE SHARPE DIFFERENCE AVERAGE"] = np.percentile(relDiffSharpe["Sharpe Difference"].values, 50)
        relDifVals = relDiffSharpe["Sharpe Difference"].values
        metrics["RELATIVE SHARPE DIFFERENCE GREATER THAN 0"] = len(relDifVals[np.where(relDifVals > 0)])/float(len(relDifVals))
        metrics["25TH PERCENTILE RELATIVE SHARPE DIFFERENCE"] = np.percentile(relDiffSharpe["Sharpe Difference"].values, 25)
        ###
    
        metrics["ROLLING SHARPE BETA"] = abs(empyrical.beta(rollingSharpe["252 Day Rolling Sharpe Algo"], rollingSharpe["252 Day Rolling Sharpe Factor"]))
        metrics["25TH PERCENTILE SHARPE"] = np.percentile(rollingSharpe["252 Day Rolling Sharpe Algo"].values, 25)
        metrics["MIN ROLLING SHARPE"] = np.percentile(rollingSharpe["252 Day Rolling Sharpe Algo"].values, 1)

        rollingDownside = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.max_drawdown(x)).dropna()
        rollingDownside.columns = ["252 Day Rolling Downside"]
        rollingDownsideFactor = factorReturn.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.max_drawdown(x)).dropna()
        rollingDownside = rollingDownside.join(rollingDownsideFactor)
        rollingDownside.columns = ["252 Day Rolling Downside Algo", "252 Day Rolling Downside Factor"]

        metrics["ROLLING SHARPE STABILITY"] = abs(stats.linregress(np.arange(len(rollingSharpe["252 Day Rolling Sharpe Algo"].values)),
                                rollingSharpe["252 Day Rolling Sharpe Algo"].values).rvalue)
    

        rollingReturn = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.cum_returns(x)[-1]).dropna()
        rollingReturn.columns = ["ROLLING RETURN"]
        metrics["SMART INFORMATION RATIO"] = (np.percentile(rollingReturn["ROLLING RETURN"].values, 25) - empyrical.annual_return(factorReturn.values[0]))\
                        / returnStream.values.std()

        metrics["ROLLING SHARPE ERROR"] = rollingSharpe["252 Day Rolling Sharpe Algo"].std()
        metrics["ONE STD SHARPE"] = empyrical.sharpe_ratio(slippageAdjustedReturn) - metrics["ROLLING SHARPE ERROR"]
        if plotting == True:
            import matplotlib.pyplot as plt 
            rollingSharpe.plot()
            rollingDownside.plot()

    rollingPeriod = 90


    rollingSharpe = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.sharpe_ratio(x)).dropna()
    rollingSharpe.columns = ["90 Day Rolling Sharpe"]

    if len(rollingSharpe["90 Day Rolling Sharpe"].values) > 50:

        metrics["25TH PERCENTILE SHARPE 90"] = np.percentile(rollingSharpe["90 Day Rolling Sharpe"].values, 25)
        metrics["MIN ROLLING SHARPE 90"] = np.percentile(rollingSharpe["90 Day Rolling Sharpe"].values, 1)
        metrics["ROLLING SHARPE ERROR 90"] = rollingSharpe["90 Day Rolling Sharpe"].std()
        metrics["SHARPE TO MIN RATIO 90"] = metrics["SHARPE"] / abs(metrics["MIN ROLLING SHARPE 90"])
        
        metrics["MIN PROFITABILITY 90"] = np.percentile(returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 1)
        metrics["PROFITABILITY DROP 90"] = metrics["PROFITABILITY"] - metrics["MIN PROFITABILITY 90"]
        metrics["25TH PROFITABILITY 90"] = np.percentile(returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 25)
        
        metrics["MIN FACTOR PROFITABILITY 90"] = np.percentile(factorReturn.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 1)
        metrics["MIN PROFITABILITY DIFFERENCE 90"] = metrics["MIN PROFITABILITY 90"] - metrics["MIN FACTOR PROFITABILITY 90"] 

    rollingPeriod = 45


    rollingSharpe = returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:empyrical.sharpe_ratio(x)).dropna()
    rollingSharpe.columns = ["45 Day Rolling Sharpe"]

    if len(rollingSharpe["45 Day Rolling Sharpe"].values) > 50:

        metrics["25TH PERCENTILE SHARPE 45"] = np.percentile(rollingSharpe["45 Day Rolling Sharpe"].values, 25)
        metrics["MIN ROLLING SHARPE 45"] = np.percentile(rollingSharpe["45 Day Rolling Sharpe"].values, 1)
        metrics["ROLLING SHARPE ERROR 45"] = rollingSharpe["45 Day Rolling Sharpe"].std()
        metrics["SHARPE TO MIN RATIO 45"] = metrics["SHARPE"] / abs(metrics["MIN ROLLING SHARPE 45"])
        
        metrics["MIN PROFITABILITY 45"] = np.percentile(returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 1)
        metrics["PROFITABILITY DROP 45"] = metrics["PROFITABILITY"] - metrics["MIN PROFITABILITY 45"]
        metrics["25TH PROFITABILITY 45"] = np.percentile(returnStream.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 25)

        metrics["MIN FACTOR PROFITABILITY 45"] = np.percentile(factorReturn.rolling(rollingPeriod, min_periods=rollingPeriod).apply(lambda x:len((x)[x > 0])/len(x)).dropna().values, 1)
        metrics["MIN PROFITABILITY DIFFERENCE 45"] = metrics["MIN PROFITABILITY 45"] - metrics["MIN FACTOR PROFITABILITY 45"] 

    returns = returnStream.apply(lambda x:empyrical.cum_returns(x))
    returns.columns = ["algo"]
    factorReturn = factorReturn.apply(lambda x:empyrical.cum_returns(x))
    returns = returns.join(factorReturn)
    returns.columns = ["Algo Return", "Factor Return"]


        ##FORCE SHOW
    if plotting == True:
        import matplotlib.pyplot as plt 
        returns.plot()
        plt.show()
    return metrics