def test_ratio_portfolio(self): start = '2018-01-14 00:00:00' end = '2018-01-15 00:00:00' strategy = TestMACStrategy() runner = backtest(start, end, strategy=strategy, portfolio_cls=Portfolio) # Check if rebalance is executed correctly positions_list = runner.positions_list new_positions_list = [] for i, actions in enumerate(strategy.actions_list): positions = dict() old_positions = positions_list[i] for key in old_positions.keys(): if key in actions: positions[key] = actions[key] + old_positions[key] else: positions[key] = old_positions[key] new_positions_list.append(positions) for gp, sp in tqdm(zip(positions_list, new_positions_list)): for key in gp.keys(): self.assertAlmostEqual(gp[key], sp[key]) # Check if portfolio value is udpated result = runner.equity_curve["total"].values abs_val = np.max(np.abs(result - np.mean(result))) self.assertNotAlmostEqual(abs_val, 0)
def test_ratio_portfolio(self): start = '2018-01-14 00:00:00' end = '2018-01-15 00:00:00' runner = backtest(start, end, strategy=TestRandomRatioStrategy(), portfolio_cls=RatioPortfolio) # Check if rebalance is executed correctly weights_list = runner.weights_list for generated_weights, strategy_weights in tqdm(zip( weights_list, runner.strategy.actions_list)): np.testing.assert_array_almost_equal(generated_weights, strategy_weights) # Check if portfolio value is udpated result = runner.equity_curve["total"].values abs_val = np.max(np.abs(result - np.mean(result))) self.assertNotAlmostEqual(abs_val, 0)
y_pred_train_binary = X_train_df['nn_predictions_binary'].values y_valid_pred = X_valid_df['nn_predictions_prob'].values y_pred_valid_binary = X_valid_df['nn_predictions_binary'].values nn_train_acc = accuracy_score(y_train_df.values, y_pred_train_binary) nn_valid_acc = accuracy_score(y_valid_df.values, y_pred_valid_binary) nn_test_acc = accuracy_score(y_test_df.values, y_pred_binary) print('Train Accuracy is {} ; Error is {}'.format(nn_train_acc, 1 - nn_train_acc)) print('Valid Accuracy is {} ; Error is {}'.format(nn_valid_acc, 1 - nn_valid_acc)) print('Test Accuracy is {} ; Error is {}'.format(nn_test_acc, 1 - nn_test_acc)) profit, nn_cum_profit = utils.backtest(y_pred_binary, y_test_pred, 'nn_model_player') X_train_df['nn_predictions_prob'] = y_train_pred X_train_df['nn_predictions_binary'] = y_pred_train_binary X_valid_df['nn_predictions_prob'] = y_valid_pred X_valid_df['nn_predictions_binary'] = y_pred_valid_binary X_test_df['nn_predictions_prob'] = y_test_pred X_test_df['nn_predictions_binary'] = y_pred_binary # train gradient boosting params_1 = { 'boosting_type': 'gbdt', 'objective': 'binary', 'learning_rate': 0.01, 'verbosity': 0 }
def backtest(start_date='2019-04-23'): spy_data = get_spy_data(start_date=start_date) wsb_data = get_wsb_data(start_date=start_date) return utils.backtest(wsb_df=wsb_data, spy_df=spy_data)
optimizer.step() if i % 100 == 0: plt.plot(pred_train) plt.plot(target_train) # plt.show() loss_val, pred_val, target_val = evaluate_lstm( dataloader=valloader, model=seq, criterion=criterion) plt.scatter(range(len(pred_val)), pred_val) plt.scatter(range(len(pred_val)), target_val) # plt.show() index, real = backtest(pred_val, y_validate) print(index[-1]) # save according to profitability if index[-1] > global_profit_val and i > 200: print("CURRENT BEST") global_profit_val = index[-1] save_checkpoint( { 'epoch': i + 1, 'state_dict': seq.state_dict() }, is_best=True, filename='checkpoint_lstm.pth.tar') save_checkpoint({
def session(config, args): codes, start_date, end_date, features, agent_config, \ market, predictor, framework, window_length, noise_flag, record_flag, \ plot_flag, reload_flag, trainable, method, epochs = parse_config(config, args) env = Environment(args.seed) stocktrader = StockTrader() path = "result/{}/{}/".format(framework, args.num) logger.info('Mode: {}'.format(args.mode)) if args.mode == 'train': if not os.path.exists(path): os.makedirs(path) train_start_date, train_end_date, test_start_date, test_end_date, codes = env.get_repo( start_date, end_date, codes, market) logger.debug("Training with codes: {}".format(codes)) env.get_data(train_start_date, train_end_date, features, window_length, market, codes) with open(path + 'config.json', 'w') as f: print(train_start_date) print(train_end_date) print(test_start_date) print(test_end_date) json.dump( { "train_start_date": train_start_date.strftime('%Y-%m-%d'), "train_end_date": train_end_date.strftime('%Y-%m-%d'), "test_start_date": test_start_date.strftime('%Y-%m-%d'), "test_end_date": test_end_date.strftime('%Y-%m-%d'), "codes": codes }, f) else: with open('result/{}/{}/config.json'.format(framework, args.num), 'r') as f: dict_data = json.load(f) train_start_date, train_end_date, codes = datetime.strptime( dict_data['train_start_date'], '%Y-%m-%d'), datetime.strptime(dict_data['train_end_date'], '%Y-%m-%d'), dict_data['codes'] env.get_data(train_start_date, train_end_date, features, window_length, market, codes) if framework == 'PG': logger.debug("Loading PG Agent") agent = PG( len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), reload_flag, trainable, args.num) elif framework == 'DDPG': logger.debug("Loading DDPG Agent") agent = DDPG( len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), reload_flag, trainable, args.num) logger.info("Training: %d epochs", epochs) for epoch in range(epochs): traversal(stocktrader, agent, env, epoch, True, framework, method, trainable) if record_flag: stocktrader.write(epoch, framework) if plot_flag: stocktrader.plot_result() agent.reset_buffer() stocktrader.print_result(epoch, agent, True) stocktrader.reset() agent.close() elif args.mode == 'test': with open("result/{}/{}/config.json".format(framework, args.num), 'r') as f: dict_data = json.load(f) test_start_date, test_end_date, codes = datetime.strptime( dict_data['test_start_date'], '%Y-%m-%d'), datetime.strptime(dict_data['test_end_date'], '%Y-%m-%d'), dict_data['codes'] env.get_data(test_start_date, test_end_date, features, window_length, market, codes) if framework == 'PG': logger.info("Loading PG Agent") agent = PG( len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), True, False, args.num) elif framework == 'DDPG': logger.info("Loading DDPG Agent") agent = DDPG( len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), True, False, args.num) backtest([agent], env, "result/{}/{}/".format(framework, args.num), framework)