Exemple #1
0
reward_transformer = load(os.path.join("../../models/reward_transformer.pkl"))

testDf = mod_df.tail(120000).head(100000)
composite_env = CompositeEnv(testDf,
                             openerPriceDiffGenerator,
                             buyer_feat_gen,
                             sellerPriceDiffGenerator,
                             startDeposit=300,
                             lot_size=0.1,
                             lot_coef=100000,
                             spread=18,
                             spread_coef=0.00001,
                             stopType="const",
                             takeType="const",
                             stopPos=2,
                             takePos=2,
                             maxLoss=20000,
                             maxTake=20000,
                             stoplossPuncts=200,
                             takeprofitPuncts=100,
                             riskPoints=110,
                             riskLevels=5,
                             parallelOpener=False,
                             renderDir="../../data/pictures",
                             renderName="test_plot",
                             env_name="test_env",
                             turn_off_spread=False)
buyer_env = composite_env.state_dict["buyer"]
openerAgent = DQNAgent("opener", composite_env.observation_space["opener"],
                       composite_env.action_space["opener"].n)
buyerAgent = DQNAgent("buyer", composite_env.observation_space["buyer"],
################################
price_diff_generator = FeatGen_ScaledWindow(feat_list, n_points=256, flat_stack=False)
opener_price_diff_generator = price_diff_generator
buyer_price_diff_generator = price_diff_generator
seller_price_diff_generator = price_diff_generator

#reward_transformer = RewardTransformer()
#reward_transformer.fit(df, spread_coef=0.00001, lot_coef=100000, lot_size=0.1, window_width=40)
#save(reward_transformer, path=os.path.join("../../models/reward_transformer.pkl"))
reward_transformer = load( os.path.join("../../models/reward_transformer.pkl") )

test_df = mod_df.tail(380000).head(40000)
#back_test_df = mod_df.head(50000).tail(3192).head(1192)
test_env = CompositeEnv(test_df, opener_price_diff_generator, buyer_price_diff_generator, seller_price_diff_generator,
                       start_deposit=300, lot_size=0.1, lot_coef=100000, spread=18, spread_coef=0.00001,
                       stop_type="const", take_type="const", stop_pos=2, take_pos=2, max_loss=20000, max_take=20000,
                       stoploss_puncts=2000, takeprofit_puncts=2000, risk_points=110, risk_levels=5, parallel_opener=False,
                       render_dir="../../data/pictures", render_name="test_plot", env_name="test_env", turn_off_spread=False)

scores = []
for i in range( 10 ):
    agent = load( os.path.join("../../models/", "composite_agent_{}.pkl".format(i)))
    agent_scores = []
    for j in range( 10 ):
        score, deals_statistics = agent.use_agent(test_env, render_deals=False)
        median_deal = np.median( deals_statistics )
        agent_scores.append( median_deal )
    agent_mean_score = np.mean( agent_scores )
    scores.append( agent_mean_score )

print( "Scores: {}".format( scores ) )
Exemple #3
0
reward_transformer = load(os.path.join("../../models/reward_transformer.pkl"))

trainDf = mod_df.tail(120000).head(100000)
trainEnv = CompositeEnv(trainDf,
                        openerPriceDiffGenerator,
                        buyerPriceDiffGenerator,
                        sellerPriceDiffGenerator,
                        startDeposit=300,
                        lot_size=0.1,
                        lot_coef=100000,
                        spread=18,
                        spread_coef=0.00001,
                        stopType="const",
                        takeType="const",
                        stopPos=2,
                        takePos=1,
                        maxLoss=20000,
                        maxTake=20000,
                        stoplossPuncts=200,
                        takeprofitPuncts=100,
                        riskPoints=110,
                        riskLevels=5,
                        parallelOpener=False,
                        renderFlag=True,
                        renderDir="../../data/pictures",
                        renderName="train_plot",
                        env_name="train_env",
                        turn_off_spread=True)

backTestDf = mod_df.tail(140000).head(20000)
#backTestDf = modDf.head(50000).tail(3192).head(1192)