def backtest(agent, env, market):
    global PATH_prefix
    print("starting to backtest......")
    from agents.UCRP import UCRP
    from agents.Winner import WINNER
    from agents.Losser import LOSSER

    agents = []
    agents.extend(agent)
    agents.append(WINNER())
    agents.append(UCRP())
    agents.append(LOSSER())
    labels = ['PG', 'Winner', 'UCRP', 'Losser']

    wealths_result = []
    rs_result = []
    for i, agent in enumerate(agents):
        stocktrader = StockTrader()
        info = env.step(None, None, 'False')
        r, contin, s, w1, p, risk = parse_info(info)
        contin = 1
        wealth = 10000
        wealths = [wealth]
        rs = [1]
        while contin:
            w2 = agent.predict(s, w1)
            env_info = env.step(w1, w2, 'False')
            r, contin, s_next, w1, p, risk = parse_info(env_info)
            wealth = wealth * math.exp(r)
            rs.append(math.exp(r) - 1)
            wealths.append(wealth)
            s = s_next
            stocktrader.update_summary(0, r, 0, 0, w2, p)

        stocktrader.write(map(lambda x: str(x), env.get_codes()), labels[i])
        print('finish one agent')
        wealths_result.append(wealths)
        rs_result.append(rs)

#    print('资产名称','   ','平均日收益率','   ','夏普率','   ','最大回撤')
    print('Asset Name', '   ', 'Average daily rate of return', '   ',
          'Sharp rate', '   ', 'Maximum withdrawal')
    plt.figure(figsize=(8, 6), dpi=100)
    for i in range(len(agents)):
        plt.plot(wealths_result[i], label=labels[i])
        plt.title('RL - PM in the {} stock market'.format(market))
        mrr = float(np.mean(rs_result[i]) * 100)
        sharpe = float(
            np.mean(rs_result[i]) / np.std(rs_result[i]) * np.sqrt(252))
        maxdrawdown = float(
            max(1 - min(wealths_result[i]) /
                np.maximum.accumulate(wealths_result[i])))
        print(labels[i], '   ', round(mrr, 3), '%', '   ', round(sharpe, 3),
              '  ', round(maxdrawdown, 3))
    plt.legend()
    plt.savefig(PATH_prefix + 'backtest.png')
    plt.show()
def backtest(agent, env, path, framework):
    logger.debug("Backtest")

    agents = []
    agents.extend(agent)
    agents.append(UCRP())
    agents.append(Loser())
    agents.append(Winner())
    labels = [framework, 'UCRP', "Loser", "Winner"]

    wealths_result = []
    rs_result = []
    for i, agent in enumerate(agents):
        stocktrader = StockTrader()
        info = env.step(None, None, False)
        r, done, s, w1, p, risk = parse_info(info)
        done = 1
        wealth = 10000000
        wealths = [wealth]
        rs = [1]
        while done:
            w2 = agent.predict(s, w1)
            env_info = env.step(w1, w2, False)
            r, done, s_next, w1, p, risk = parse_info(env_info)
            wealth = wealth * math.exp(r)
            rs.append(math.exp(r) - 1)
            wealths.append(wealth)
            s = s_next
            stocktrader.update_summary(0, r, 0, 0, w2, p)

        stocktrader.write(map(lambda x: str(x), env.get_codes()), labels[i])
        logger.debug('Finished agents {}'.format(i))
        wealths_result.append(wealths)
        rs_result.append(rs)

    logger.info('资产名称 \t 平均日收益率 \t 夏普率 \t 最大回撤')
    plt.figure(figsize=(8, 6), dpi=100)
    for i in range(len(agents)):
        plt.plot(wealths_result[i], label=labels[i])
        mrr = float(np.mean(rs_result[i]) * 100)
        sharpe = float(
            np.mean(rs_result[i]) / np.std(rs_result[i]) * np.sqrt(252))
        maxdrawdown = float(
            max(1 - min(wealths_result[i]) /
                np.maximum.accumulate(wealths_result[i])))
        logger.info("%s \t %s \t %s \t %s", labels[i], round(mrr, 3),
                    round(sharpe, 3), round(maxdrawdown, 3))
    plt.legend()
    plt.savefig(path + 'backtest.png')
def backtest(agent,env):
    print("starting to backtest......")
    from agents.UCRP import UCRP
    from agents.Winner import WINNER
    from agents.Losser import LOSSER

    agents=[]
    agents.append(agent)
    #agents.append(WINNER())
    agents.append(UCRP())
    #agents.append(LOSSER())
    #labels=['PG','Winner','UCRP','Losser']
    labels=['PG','UCRP']

    wealths_result=[]
    rs_result=[]
    for i,agent in enumerate(agents):
        info = env.step(None, None)
        r, contin, s, w1, p, risk = parse_info(info)
        contin = 1
        wealth=10000
        wealths = [wealth]
        rs=[1]
        while contin:
            w2 = agent.predict(s, w1)
            if i==0:
                print(w2)
            env_info = env.step(w1, w2)
            r, contin, s_next, w1, p, risk = parse_info(env_info)
            wealth=wealth*math.exp(r)
            rs.append(math.exp(r)-1)
            wealths.append(wealth)
            s=s_next
        print('finish one agent')
        wealths_result.append(wealths)
        rs_result.append(rs)

    for i in range(len(agents)):
        plt.plot(wealths_result[i],label=labels[i])
        print(labels[i],'   ',np.mean(rs_result[i]),'   ',np.std(rs_result[i]))
    plt.legend()
    plt.show()
Exemplo n.º 4
0
def backtest(pg, env, codes, mode, market, predictor):
    global PATH_prefix

    agents = []
    agents.append(pg)
    ucrp = UCRP()
    agents.append(ucrp)
    mpt = MPT()
    agents.append(mpt)
    strategy_labels = [predictor, 'UCRP', 'MinVar']
    all_growth = pd.DataFrame()
    final_info = pd.DataFrame(index=strategy_labels)
    for i, strategy in enumerate(agents):
        G_t, y_t, price_t, terminal = env.reset()
        total_w1 = []
        w1_t = torch.zeros(G_t.shape[0])
        w1_t[0] = 1
        total_w1.append(w1_t)
        G_t = torch.Tensor(G_t)
        total_reward = torch.tensor(0)
        daily_returns = []
        cumulative_growth = []
        t = 0
        while terminal:
            if strategy_labels[i] == 'MinVar':
                window_returns = price_t[1:, 1:] / price_t[1:, :-1]
                w2_t = strategy.forward(window_returns)
                w2_t = torch.Tensor(w2_t)
            else:
                w2_t = strategy.forward(G_t.reshape(-1), w1_t)

            mu = cost * (torch.abs(w2_t[1:] - w1_t[1:])).sum()
            G_t2, y_t2, price_t2, terminal = env.step()
            y_t2 = torch.Tensor(y_t2)
            reward = torch.dot(w2_t, y_t2) * (1 - mu)
            reward = -torch.log(reward)
            w1_t2 = (w2_t * y_t2) / torch.dot(w2_t, y_t2)
            total_reward = total_reward + reward
            daily_returns.append(torch.exp(-reward) - 1)
            cumulative_growth.append(torch.exp(-total_reward))
            w1_t = w1_t2
            G_t = torch.Tensor(G_t2)
            price_t = price_t2
            total_w1.append(w1_t)
            t = t + 1
        SR, exp_return, std_dev = sharpe_ratio(torch.stack(daily_returns))
        mdd = max_drawdown(torch.stack(cumulative_growth))
        print("Calculating Sharpe Ratio with average daily return of:",
              exp_return.item(), ' %', 'and std of:', std_dev.item(), ' %')
        print('Sharp ratio for ' + strategy_labels[i] + ':', SR)
        print('MDD for ' + strategy_labels[i] + ':', mdd)
        print('Total steps per epoch:', t)
        print('Final Test Growth for ' + strategy_labels[i] + ':',
              cumulative_growth[-1])
        print('Final weight allocation for ' + strategy_labels[i] + ':', w1_t)
        consolidate_info(final_info, strategy_labels[i], SR.item(),
                         exp_return.item(), std_dev.item(), mdd.item(),
                         cumulative_growth[-1].item())
        np.savetxt(r'' + PATH_prefix + strategy_labels[i] + '_weights_' + mode,
                   torch.stack(total_w1).data.numpy(),
                   delimiter=",")
        all_growth[strategy_labels[i]] = torch.stack(
            cumulative_growth).data.numpy()
        plt.plot(cumulative_growth, label=strategy_labels[i])

    all_growth.to_csv(r'' + PATH_prefix + 'all_growth_' + mode + '_rc_' +
                      str(rc_factor) + '_' + predictor + '_' + market +
                      '_hs_' + str(pg.n_hidden) + '_back_test.csv')
    final_info.to_csv(r'' + PATH_prefix + 'final_info_' + mode + '_rc_' +
                      str(rc_factor) + '_' + predictor + '_' + market +
                      '_hs_' + str(pg.n_hidden) + '_back_test.csv')
    plt.legend()
    plt.savefig(r'' + PATH_prefix + mode + '_rc_' + str(rc_factor) + '_' +
                predictor + '_' + market + '_hs_' + str(pg.n_hidden) +
                '_back_test.png')
    plt.show()
def backtest(agent, env):
    global PATH_prefix
    print("starting to backtest......")
    from agents.UCRP import UCRP
    from agents.Winner import WINNER
    from agents.Losser import LOSSER

    agents = []
    agents.extend(agent)
    agents.append(WINNER())
    agents.append(UCRP())
    agents.append(LOSSER())
    labels = ['PG', 'Winner', 'UCRP', 'Losser']
    wealths_result = []
    rs_result = []
    for i, agent in enumerate(agents):
        stocktrader = StockTrader()
        info = env.step(None, None, 'False')
        r, contin, s, w1, p, risk = parse_info(info)
        contin = 1
        wealth = 10000
        wealths = [wealth]
        rs = [1]
        while contin:
            w2 = agent.predict(s, w1)
            env_info = env.step(w1, w2, 'False')
            r, contin, s_next, w1, p, risk = parse_info(env_info)
            wealth = wealth * math.exp(r)
            rs.append(math.exp(r) - 1)
            wealths.append(wealth)
            s = s_next
            stocktrader.update_summary(0, r, 0, 0, w2, p)

        stocktrader.write(map(lambda x: str(x), env.get_codes()), labels[i])
        print('finish one agent')
        wealths_result.append(wealths)
        rs_result.append(rs)

    print('资产名称', '   ', '平均日收益率', '   ', '夏普率', '   ', '最大回撤')
    plt.figure(figsize=(8, 6), dpi=100)
    for i in range(len(agents)):
        # if labels[i] == 'UCRP' or labels[i] == 'Losser':
        #     continue
        plt.plot(wealths_result[i], label=labels[i])
        fileObject = open(labels[i] + '.txt', 'w')
        for ip in wealths_result[i]:
            fileObject.write(str(ip))
            fileObject.write(', ')
        fileObject.close()
        mrr = float(np.mean(rs_result[i]) * 100)
        sharpe = float(
            np.mean(rs_result[i]) / np.std(rs_result[i]) * np.sqrt(252))
        maxdrawdown = float(
            max(1 - min(wealths_result[i]) /
                np.maximum.accumulate(wealths_result[i])))
        print(labels[i], '   ', round(mrr, 3), '%', '   ', round(sharpe, 3),
              '  ', round(maxdrawdown, 3))
    # new_data = []
    # for i in LSTM:
    #     for j in range(8):
    #         new_data.append(i)
    # inter_op = interpolate.interp1d(range(len(hand_data)), hand_data, kind='linear')
    # hand_data = inter_op(7*range(len(hand_data)))

    # def longer(i):
    #     k = np.zeros(180)
    #     j = 0
    #     p = 0
    #     while j + 1 < len(i):
    #         print([p, j])
    #         k[p] = i[j]
    #         k[p + 1] = i[j]
    #         k[p + 2] = i[j + 1]
    #         p += 3
    #         j += 2
    #     return i
    # LSTM = longer(LSTM)
    # Random = longer(Random)
    # Uniform = longer(Uniform)
    # plt.plot(LSTM, label='LSTM')
    # plt.plot(Random, label='Random')
    # plt.plot(Uniform, label='Uniform')
    plt.legend()
    plt.savefig(PATH_prefix + 'backtest_with_hand.png')
    plt.show()