Пример #1
0
def useAgent(symbol, timeframe, terminal, dataUpdater, dataManager,
             hkFeatList, saveDir, genPath, agentName, timeConstraint):
    ###############################
    # use agent on test
    ###############################

    useCpu(nThreads=8, nCores=8)

    openerPriceDiffGenerator = MultiScalerDiffGenerator(featureList=hkFeatList).loadGenerator(genPath)
    buyerPriceDiffGenerator = MultiScalerDiffGenerator(featureList=hkFeatList).loadGenerator(genPath)
    sellerPriceDiffGenerator = MultiScalerDiffGenerator(featureList=hkFeatList).loadGenerator(genPath)

    openerPriceDiffGenerator.setFitMode(False)
    buyerPriceDiffGenerator.setFitMode(False)
    sellerPriceDiffGenerator.setFitMode(False)

    testEnv = RealCompositeEnv(symbol, timeframe, terminal, dataUpdater, dataManager,
                               openerPriceDiffGenerator, buyerPriceDiffGenerator, sellerPriceDiffGenerator,
                               startDeposit=300, lotSize=0.1, lotCoef=100000, stopType="adaptive", takeType="adaptive",
                               stoplossPuncts=100, takeprofitPuncts=200, stopPos=2, takePos=1, maxLoss=40000,
                               maxTake=40000, riskPoints=110, riskLevels=5, renderFlag=False)

    # get size of state and action from environment
    openerAgent = DQNAgent(testEnv.observation_space["opener"], testEnv.action_space["opener"].n)
    buyerAgent = DQNAgent(testEnv.observation_space["buyer"], testEnv.action_space["buyer"].n)
    sellerAgent = DQNAgent(testEnv.observation_space["seller"], testEnv.action_space["seller"].n)
    agent = CompositeAgent(openerAgent, buyerAgent, sellerAgent)
    agent = agent.load_agent(saveDir, agentName, dropSupportModel=True)
    # agent  = agent.load_agent("./", "checkpoint_composite")
    print("start using agent")

    startTime = datetime.now()
    agent.use_agent(testEnv, timeConstraint=timeConstraint)
    endTime = datetime.now()
    print("Use time: {}".format(endTime - startTime))
    reset_keras()
Пример #2
0
trainDf = modDf.tail(30000).head(27000)
trainEnv = CompositeEnv(trainDf, openerCompositeGenerator, buyerCompositeGenerator, sellerCompositeGenerator,
                        startDeposit=300, lotSize=0.1, lotCoef=100000, spread=18, spreadCoef=0.00001,
                        stopType="const", takeType="const", stopPos=2, takePos=1, maxLoss=20000, maxTake=20000,
                        stoplossPuncts=20000, takeprofitPuncts=20000, riskPoints=110, riskLevels=5, parallelOpener=False,
                        renderFlag=True, renderDir="../models/", renderName="train_plot")
backTestDf = modDf.tail(33000).head(3000)
#backTestDf = modDf.head(50000).tail(3192).head(1192)
backTestEnv = CompositeEnv(backTestDf, openerCompositeGenerator, buyerCompositeGenerator, sellerCompositeGenerator,
                           startDeposit=300, lotSize=0.1, lotCoef=100000, spread=18, spreadCoef=0.00001,
                           stopType="adaptive", takeType="adaptive", stopPos=1, takePos=1, maxLoss=20000, maxTake=20000,
                           stoplossPuncts=20000, takeprofitPuncts=20000, riskPoints=110, riskLevels=5, parallelOpener=False,
                           renderDir="../models/", renderName="back_plot")
# get size of state and action from environment
openerAgent = DQNAgent(trainEnv.observation_space["opener"], trainEnv.action_space["opener"].n,
                           memorySize=40000, batch_size=20, train_start=20000, epsilon_min=0.2, epsilon=1, discount_factor=0.99,
                           epsilon_decay=0.9999, learning_rate=0.0001)
buyerAgent = DQNAgent(trainEnv.observation_space["buyer"], trainEnv.action_space["buyer"].n,
                          memorySize=40000, batch_size=20, train_start=10000, epsilon_min=0.2, epsilon=1, discount_factor=0.99,
                          epsilon_decay=0.9999, learning_rate=0.0001)
sellerAgent = DQNAgent(trainEnv.observation_space["seller"], trainEnv.action_space["seller"].n,
                           memorySize=40000, batch_size=20, train_start=10000, epsilon_min=0.2, epsilon=1, discount_factor=0.99,
                           epsilon_decay=0.9999, learning_rate=0.0001)

agent = CompositeAgent(openerAgent, buyerAgent, sellerAgent)
###################################
#agent  = agent.load_agent("../models/", "best_composite")
#agent.agents["opener"].epsilon_min = 0.1
#agent.agents["buy"].epsilon_min = 0.1
#agent.agents["sell"].epsilon_min = 0.1
#agent  = agent.load_agent("../models/", "checkpoint_composite")
Пример #3
0
                       takeType="adaptive",
                       stopPos=1,
                       takePos=1,
                       maxLoss=20000,
                       maxTake=20000,
                       stoplossPuncts=2000,
                       takeprofitPuncts=2000,
                       riskPoints=110,
                       riskLevels=5,
                       parallelOpener=False,
                       renderDir="../models/",
                       renderName="test_plot")

# get size of state and action from environment
openerAgent = DQNAgent(testEnv.observation_space["opener"],
                       testEnv.action_space["opener"].n,
                       fillMemoryByPretrainedModel=True)
buyerAgent = DQNAgent(testEnv.observation_space["buyer"],
                      testEnv.action_space["buyer"].n,
                      fillMemoryByPretrainedModel=True)
sellerAgent = DQNAgent(testEnv.observation_space["seller"],
                       testEnv.action_space["seller"].n,
                       fillMemoryByPretrainedModel=True)
#buyerAgent = AlwaysHoldAgent(testEnv.observation_space["opener"], testEnv.action_space["opener"].n, agentName="buyer")
#sellerAgent = AlwaysHoldAgent(testEnv.observation_space["seller"], testEnv.action_space["seller"].n, agentName="seller")
agent = CompositeAgent(openerAgent, buyerAgent, sellerAgent)
#agent = agent.load_agent("../models/", "best_composite", dropSupportModel=True)
#agent = agent.loadPretrainedAgents(dir="../models/", baseName="super_resnet_34_{}_{}".format(symbol, timeframe))
agent = agent.loadPretrainedAgents(dir="../models/", baseName="qrl_resnet_34")
#agent  = agent.load_agent("../models/", "checkpoint_composite")
print("start using agent")